Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 52 additions & 19 deletions src/pytest_html/nextgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,19 @@ def __init__(self, title, config):
"collectedItems": 0,
"runningState": "not_started",
"environment": {},
"tests": [],
"tests": defaultdict(list),
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Made a small performance improvement to avoid having to iterate through all tests, we key them with the original nodeid.

"resultsTableHeader": {},
"additionalSummary": defaultdict(list),
}

@property
def title(self):
return self._data["title"]

@title.setter
def title(self, title):
self._data["title"] = title

@property
def config(self):
return self._config
Expand All @@ -79,19 +87,33 @@ def config(self):
def data(self):
return self._data

def add_test(self, test):
self._data["tests"].append(test)

def set_data(self, key, value):
self._data[key] = value

@property
def title(self):
return self._data["title"]
def add_test(self, test_data, report):
# regardless of pass or fail we must add teardown logging to "call"
if report.when == "teardown":
self.update_test_log(report)

@title.setter
def title(self, title):
self._data["title"] = title
# passed "setup" and "teardown" are not added to the html
if report.when == "call" or _is_error(report):
processed_logs = _process_logs(report)
test_data["log"] = _handle_ansi(processed_logs)
self._data["tests"][report.nodeid].append(test_data)
return True

return False

def update_test_log(self, report):
log = []
for test in self._data["tests"][report.nodeid]:
if test["testId"] == report.nodeid:
for section in report.sections:
header, content = section
if "teardown" in header:
log.append(f" \n{header:-^80} ")
log.append(content)
test["log"] += _handle_ansi("\n".join(log))

def __init__(self, report_path, config, default_css="style.css"):
self._report_path = Path(os.path.expandvars(report_path)).expanduser()
Expand Down Expand Up @@ -269,7 +291,6 @@ def pytest_runtest_logreport(self, report):

data = {
"duration": report.duration,
"when": report.when,
}

test_id = report.nodeid
Expand All @@ -291,14 +312,11 @@ def pytest_runtest_logreport(self, report):
test_id += f"::{report.when}"
data["testId"] = test_id

# Order here matters!
log = report.longreprtext or report.capstdout or "No log output captured."
data["log"] = _handle_ansi(log)
data["result"] = _process_outcome(report)
data["extras"] = self._process_extras(report, test_id)

self._report.add_test(data)
self._generate_report()
if self._report.add_test(data, report):
self._generate_report()
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Only generate the report if we actually made a change (added a test in this case).



class NextGenReport(BaseReport):
Expand All @@ -313,8 +331,6 @@ def __init__(self, report_path, config):

@property
def css(self):
# print("woot", Path(self._assets_path.name, "style.css"))
# print("waat", self._css_path.relative_to(self._report_path.parent))
return Path(self._assets_path.name, "style.css")

def _data_content(self, content, asset_name, *args, **kwargs):
Expand Down Expand Up @@ -392,8 +408,25 @@ def _process_css(default_css, extra_css):
return css


def _is_error(report):
return report.when in ["setup", "teardown"] and report.outcome == "failed"


def _process_logs(report):
log = []
if report.longreprtext:
log.append(report.longreprtext)
for section in report.sections:
header, content = section
log.append(f" \n{header:-^80} ")
log.append(content)
if not log:
log.append("No log output captured.")
return "\n".join(log)


def _process_outcome(report):
if report.when in ["setup", "teardown"] and report.outcome == "failed":
if _is_error(report):
return "Error"
if hasattr(report, "wasxfail"):
if report.outcome in ["passed", "failed"]:
Expand Down
2 changes: 1 addition & 1 deletion src/pytest_html/scripts/datamanager.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ const { getCollapsedCategory } = require('./storage.js')
class DataManager {
setManager(data) {
const collapsedCategories = [...getCollapsedCategory(), 'passed']
const dataBlob = { ...data, tests: data.tests.map((test, index) => ({
const dataBlob = { ...data, tests: Object.values(data.tests).flat().map((test, index) => ({
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This flattens (removes the nodeid key) from the JSON data.

...test,
id: `test_${index}`,
collapsed: collapsedCategories.includes(test.result.toLowerCase()),
Expand Down
1 change: 1 addition & 0 deletions src/pytest_html/scripts/dom.js
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ const dom = {
formattedDuration = formatDuration < 1 ? formattedDuration.ms : formattedDuration.formatted
const resultBody = templateResult.content.cloneNode(true)
resultBody.querySelector('tbody').classList.add(resultLower)
resultBody.querySelector('tbody').id = testId
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added this to be able to differentiate between logs using the testId.

resultBody.querySelector('.col-result').innerText = result
resultBody.querySelector('.col-result').classList.add(`${collapsed ? 'expander' : 'collapser'}`)
resultBody.querySelector('.col-result').dataset.id = id
Expand Down
9 changes: 3 additions & 6 deletions src/pytest_html/scripts/main.js
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,7 @@ const renderStatic = () => {
}

const renderContent = (tests) => {
const renderSet = tests.filter(({ when, result }) => when === 'call' || result === 'Error' )
const rows = renderSet.map(dom.getResultTBody)
const rows = tests.map(dom.getResultTBody)
const table = document.querySelector('#results-table')
removeChildren(table)
const tableHeader = dom.getListHeader(manager.renderData)
Expand Down Expand Up @@ -62,8 +61,6 @@ const renderContent = (tests) => {
}

const renderDerived = (tests, collectedItems, isFinished) => {
const renderSet = tests.filter(({ when, result }) => when === 'call' || result === 'Error')

const possibleResults = [
{ result: 'passed', label: 'Passed' },
{ result: 'skipped', label: 'Skipped' },
Expand All @@ -76,15 +73,15 @@ const renderDerived = (tests, collectedItems, isFinished) => {

const currentFilter = getVisible()
possibleResults.forEach(({ result, label }) => {
const count = renderSet.filter((test) => test.result.toLowerCase() === result).length
const count = tests.filter((test) => test.result.toLowerCase() === result).length
const input = document.querySelector(`input[data-test-result="${result}"]`)
document.querySelector(`.${result}`).innerText = `${count} ${label}`

input.disabled = !count
input.checked = currentFilter.includes(result)
})

const numberOfTests = renderSet.filter(({ result }) =>
const numberOfTests = tests.filter(({ result }) =>
['Passed', 'Failed', 'XPassed', 'XFailed'].includes(result)).length

if (isFinished) {
Expand Down
103 changes: 100 additions & 3 deletions testing/test_integration.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,8 @@ def run(pytester, path="report.html", *args):
pytester.runpytest("-s", "--html", path, *args)

chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
if os.environ.get("CI", False):
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
driver = webdriver.Remote(
command_executor="http://127.0.0.1:4444", options=chrome_options
Expand Down Expand Up @@ -90,9 +91,12 @@ def get_text(page, selector):
return get_element(page, selector).string


def get_log(page):
def get_log(page, test_id=None):
# TODO(jim) move to get_text (use .contents)
log = get_element(page, ".summary div[class='log']")
if test_id:
log = get_element(page, f".summary tbody[id$='{test_id}'] div[class='log']")
else:
log = get_element(page, ".summary div[class='log']")
all_text = ""
for text in log.strings:
all_text += text
Expand Down Expand Up @@ -527,3 +531,96 @@ def test_pass(): pass
)
page = run(pytester)
assert_results(page, passed=1)


class TestLogCapturing:
LOG_LINE_REGEX = r"\s+this is {}"

@pytest.fixture
def log_cli(self, pytester):
pytester.makeini(
"""
[pytest]
log_cli = 1
log_cli_level = INFO
log_cli_date_format = %Y-%m-%d %H:%M:%S
log_cli_format = %(asctime)s %(levelname)s: %(message)s
"""
)

@pytest.fixture
def test_file(self):
return """
import pytest
import logging
@pytest.fixture
def setup():
logging.info("this is setup")
{setup}
yield
logging.info("this is teardown")
{teardown}

def test_logging(setup):
logging.info("this is test")
assert {assertion}
"""

@pytest.mark.usefixtures("log_cli")
def test_all_pass(self, test_file, pytester):
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=True))
page = run(pytester)
assert_results(page, passed=1)

log = get_log(page)
for when in ["setup", "test", "teardown"]:
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))

@pytest.mark.usefixtures("log_cli")
def test_setup_error(self, test_file, pytester):
pytester.makepyfile(
test_file.format(setup="error", teardown="", assertion=True)
)
page = run(pytester)
assert_results(page, error=1)

log = get_log(page)
assert_that(log).matches(self.LOG_LINE_REGEX.format("setup"))
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format("test"))
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format("teardown"))

@pytest.mark.usefixtures("log_cli")
def test_test_fails(self, test_file, pytester):
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=False))
page = run(pytester)
assert_results(page, failed=1)

log = get_log(page)
for when in ["setup", "test", "teardown"]:
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))

@pytest.mark.usefixtures("log_cli")
@pytest.mark.parametrize(
"assertion, result", [(True, {"passed": 1}), (False, {"failed": 1})]
)
def test_teardown_error(self, test_file, pytester, assertion, result):
pytester.makepyfile(
test_file.format(setup="", teardown="error", assertion=assertion)
)
page = run(pytester)
assert_results(page, error=1, **result)

for test_name in ["test_logging", "test_logging::teardown"]:
log = get_log(page, test_name)
for when in ["setup", "test", "teardown"]:
assert_that(log).matches(self.LOG_LINE_REGEX.format(when))

def test_no_log(self, test_file, pytester):
pytester.makepyfile(test_file.format(setup="", teardown="", assertion=True))
page = run(pytester)
assert_results(page, passed=1)

log = get_log(page, "test_logging")
assert_that(log).contains("No log output captured.")
for when in ["setup", "test", "teardown"]:
assert_that(log).does_not_match(self.LOG_LINE_REGEX.format(when))