diff --git a/.drone.yml b/.drone.yml
index 4abdb59c0..9eb8b9b5d 100644
--- a/.drone.yml
+++ b/.drone.yml
@@ -29,8 +29,7 @@ steps:
- black --config black.toml --check ./rdflib || true
- flake8 --exit-zero rdflib
- mypy --show-error-context --show-error-codes rdflib
- - PYTHONWARNINGS=default nosetests --with-timer --timer-top-n 42 --with-coverage --cover-tests --cover-package=rdflib
- - coverage report --skip-covered
+ - pytest --cov
- coveralls
---
@@ -49,9 +48,9 @@ steps:
- pip install --default-timeout 60 -r requirements.txt
- pip install --default-timeout 60 -r requirements.dev.txt
- python setup.py install
- - black --config black.toml --check ./rdflib | true
+ - black --config black.toml --check ./rdflib || true
- flake8 --exit-zero rdflib
- - PYTHONWARNINGS=default nosetests --with-timer --timer-top-n 42
+ - pytest
---
kind: pipeline
@@ -69,6 +68,6 @@ steps:
- pip install --default-timeout 60 -r requirements.txt
- pip install --default-timeout 60 -r requirements.dev.txt
- python setup.py install
- - black --config black.toml --check ./rdflib | true
+ - black --config black.toml --check ./rdflib || true
- flake8 --exit-zero rdflib
- - PYTHONWARNINGS=default nosetests --with-timer --timer-top-n 42
+ - pytest
diff --git a/.gitignore b/.gitignore
index 118ec0bdd..13236940b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,21 +1,153 @@
RDFLib.sublime-project
/docs/_build/
-nosetests.xml
RDFLib.sublime-workspace
-*.egg-info/
coverage/
-dist/
-__pycache__/
-*.pyc
/.hgtags
/.hgignore
build/
-/.coverage
-/.tox/
/docs/draft/
*~
test_reports/*latest.ttl
# PyCharm
.idea/
prepare_changelog.sh
-.venv/
+#### vimdiff <(curl --silent -L https://github.com/github/gitignore/raw/master/Python.gitignore) .gitignore
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 0e28b1d40..000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,47 +0,0 @@
-# http://travis-ci.org/#!/RDFLib/rdflib
-os: linux
-arch:
- - amd64
- - ppc64le
-language: python
-branches:
- only:
- # only build master and release branches (merge request are built anyhow)
- - master
- - /^\d+\.\d+\.\d+(-.*)?$/
-git:
- depth: 3
-
-python:
- - 3.6
- - 3.7
- - 3.8
-
-jobs:
- include:
- - python: 3.8
- dist: focal
-
-before_install:
- - pip install -U setuptools pip # seems travis comes with a too old setuptools for html5lib
- - bash .travis.fuseki_install_optional.sh
-
-install:
- - pip install --default-timeout 60 -r requirements.txt
- - pip install --default-timeout 60 -r requirements.dev.txt
- - pip install --default-timeout 60 coveralls && export HAS_COVERALLS=1
- - python setup.py install
-
-before_script:
- - flake8 --exit-zero rdflib
-
-script:
- - PYTHONWARNINGS=default nosetests --with-timer --timer-top-n 42 --with-coverage --cover-tests --cover-package=rdflib
- - coverage report
-
-after_success:
- - if [[ $HAS_COVERALLS ]] ; then coveralls ; fi
-
-notifications:
- irc:
- channels: "chat.freenode.net#rdflib"
diff --git a/MANIFEST.in b/MANIFEST.in
index d48534bd1..1eeed9fe9 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -6,7 +6,6 @@ include ez_setup.py
include skiptests.list
recursive-include rdflib *.py
recursive-include examples *.py
-include run_tests.py
graft test
graft docs
prune docs/_build
diff --git a/README.md b/README.md
index 4ccd176a1..42309761d 100644
--- a/README.md
+++ b/README.md
@@ -165,20 +165,18 @@ Multiple other projects are contained within the RDFlib "family", see `_:
+Run tests with `pytest `_:
.. code-block:: bash
- $ pip install nose
- $ python run_tests.py
- $ python run_tests.py --attr known_issue # override attr in setup.cfg to run only tests marked with "known_issue"
- $ python run_tests.py --attr \!known_issue # runs all tests (including "slow" and "non_core") except those with known issues
- $ python run_tests.py --attr slow,!known_issue # comma separate if you want to specify more than one attr
- $ python run_tests.py --attr known_issue=None # use =None instead of \! if you keep forgetting to escape the ! in shell commands ;)
+ $ pip install -r requirements.txt -r requirements.dev.txt
+ $ pytest
-Specific tests can either be run by module name or file name. For example::
- $ python run_tests.py --tests rdflib.graph
- $ python run_tests.py --tests test/test_graph.py
+Specific tests can be run by file name. For example:
+
+.. code-block:: bash
+
+ $ pytest test/test_graph.py
Running static checks
---------------------
diff --git a/rdflib/extras/external_graph_libs.py b/rdflib/extras/external_graph_libs.py
index 164c210a8..69d42b29f 100644
--- a/rdflib/extras/external_graph_libs.py
+++ b/rdflib/extras/external_graph_libs.py
@@ -348,14 +348,3 @@ def rdflib_to_graphtool(
for epn, eprop in eprops:
eprop[e] = tmp_props[epn]
return g
-
-
-if __name__ == "__main__":
- import sys
- import logging.config
-
- logging.basicConfig(level=logging.DEBUG)
-
- import nose
-
- nose.run(argv=[sys.argv[0], sys.argv[0], "-v", "--without-doctest"])
diff --git a/rdflib/plugins/parsers/notation3.py b/rdflib/plugins/parsers/notation3.py
index deca4e9f6..cfdc8568d 100755
--- a/rdflib/plugins/parsers/notation3.py
+++ b/rdflib/plugins/parsers/notation3.py
@@ -1877,7 +1877,7 @@ def hexify(ustr):
corresponding to the given UTF8 string
>>> hexify("http://example/a b")
- %(b)s'http://example/a%%20b'
+ b'http://example/a%20b'
"""
# s1=ustr.encode('utf-8')
diff --git a/requirements.dev.txt b/requirements.dev.txt
index 27181725a..3c361ef96 100644
--- a/requirements.dev.txt
+++ b/requirements.dev.txt
@@ -1,12 +1,14 @@
-sphinx
-sphinxcontrib-apidoc
-nose==1.3.7
-nose-timer
-coverage
-flake8
-doctest-ignore-unicode==0.1.2
berkeleydb
black==21.9b0
+coverage
+doctest-ignore-unicode==0.1.2
+flake8
flake8-black
+html5lib
mypy
+pytest
+pytest-cov
+pytest-subtests
+sphinx
+sphinxcontrib-apidoc
types-setuptools
diff --git a/run_tests.py b/run_tests.py
index e80665d09..c8db26375 100755
--- a/run_tests.py
+++ b/run_tests.py
@@ -1,92 +1,48 @@
#!/usr/bin/env python
"""
-Testing with Nose
+Testing with pytest
=================
-This test runner uses Nose for test discovery and running. It uses the argument
-spec of Nose, but with some options pre-set. To begin with, make sure you have
-Nose installed, e.g.:
+This test runner uses pytest for test discovery and running. It uses the argument
+spec of pytest, but with some options pre-set. To begin with, make sure you have
+pytest installed, e.g.:
- $ pip install nose
+ $ pip install pytest
To run the tests, use:
$ ./run_tests.py
-If you supply attributes, the default ones defined in ``DEFAULT_ATTRS`` will be
-ignored. So to run e.g. all tests marked ``slowtest`` or ``non_standard_dep``,
-do:
-
- $ ./run_tests.py -a slowtest,non_standard_dep
For more details check .
Coverage
========
-If ``coverage.py`` is placed in $PYTHONPATH, it can be used to create coverage
-information (using the built-in coverage plugin of Nose) if the default
-option "--with-coverage" is supplied (which also enables some additional
-coverage options).
+If ``pytest-cov`` is placed in $PYTHONPATH, it can be used to create coverage
+information if the "--cov" option is supplied.
-See for details.
+See for details.
"""
-NOSE_ARGS = [
- "--with-doctest",
- "--doctest-extension=.doctest",
- "--doctest-tests",
- # '--with-EARL',
-]
-
-COVERAGE_EXTRA_ARGS = [
- "--cover-package=rdflib",
- "--cover-inclusive",
-]
-
-DEFAULT_LOCATION = "--where=./"
-
-DEFAULT_ATTRS = [] # ['!known_issue', '!sparql']
-
-DEFAULT_DIRS = ["test", "rdflib"]
-
+import json
+import sys
if __name__ == "__main__":
-
- from sys import argv, exit, stderr
-
try:
- import nose
+ import pytest
except ImportError:
print(
"""\
- Requires Nose. Try:
+ Requires pytest. Try:
- $ pip install nose
+ $ pip install pytest
Exiting. """,
- file=stderr,
+ file=sys.stderr,
)
exit(1)
- if "--with-coverage" in argv:
- try:
- import coverage
- except ImportError:
- print("No coverage module found, skipping code coverage.", file=stderr)
- argv.remove("--with-coverage")
- else:
- NOSE_ARGS += COVERAGE_EXTRA_ARGS
-
- if True not in [a.startswith("-a") or a.startswith("--attr=") for a in argv]:
- argv.append("--attr=" + ",".join(DEFAULT_ATTRS))
-
- if not [a for a in argv[1:] if not a.startswith("-")]:
- argv += DEFAULT_DIRS # since nose doesn't look here by default..
-
- if not [a for a in argv if a.startswith("--where=")]:
- argv += [DEFAULT_LOCATION]
-
- finalArgs = argv + NOSE_ARGS
- print("Running nose with:", " ".join(finalArgs[1:]))
- nose.run_exit(argv=finalArgs)
+ finalArgs = sys.argv[1:]
+ print("Running pytest with:", json.dumps(finalArgs))
+ sys.exit(pytest.main(args=finalArgs))
diff --git a/run_tests.sh b/run_tests.sh
deleted file mode 100755
index ddf2f7b44..000000000
--- a/run_tests.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-
-cd /rdflib
-pip install -e .
-
-test_command="nosetests --with-timer --timer-top-n 42 --with-coverage --cover-tests --cover-package=rdflib"
-echo "Running tests..."
-echo "Test command: $test_command"
-$test_command
\ No newline at end of file
diff --git a/run_tests_with_coverage_report.sh b/run_tests_with_coverage_report.sh
deleted file mode 100755
index a5383aaf6..000000000
--- a/run_tests_with_coverage_report.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-
-cd /rdflib
-pip install -e .
-
-test_command="nosetests --with-timer --timer-top-n 42 --with-coverage --cover-tests --cover-package=rdflib --cover-html"
-echo "Running tests..."
-echo "Test command: $test_command"
-$test_command
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
index 798121d68..4c3daf6ce 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,14 +1,6 @@
[options.package_data]
rdflib = py.typed
-[nosetests]
-attr=!known_issue,!non_core,!performancetest
-verbosity=1
-with-doctest=1
-with-doctest-ignore-unicode=1
-doctest-options=+IGNORE_UNICODE
-exclude=rdflib.plugins.sparql.paths|rdflib.extras.external_graph_libs
-
[flake8]
ignore = W806
max-line-length = 88
@@ -16,8 +8,7 @@ exclude = host,extras,transform,rdfs,pyRdfa,sparql,results,pyMicrodata
[coverage:run]
branch = True
-#source = rdflib,build/src/rdflib # specified in .travis.yml for different envs
-omit = */site-packages/*
+source = rdflib
[coverage:report]
# Regexes for lines to exclude from consideration
@@ -36,3 +27,17 @@ warn_unused_configs = True
ignore_missing_imports = True
disallow_subclassing_any = False
warn_unreachable = True
+
+[tool:pytest]
+addopts =
+ --doctest-modules
+ --ignore=test/translate_algebra
+ --ignore=admin
+ --ignore=rdflib/extras/external_graph_libs.py
+ --ignore-glob=docs/*.py
+doctest_optionflags = ALLOW_UNICODE
+filterwarnings =
+ # The below warning is a consequence of how pytest doctest detects mocks and how DefinedNamespace behaves when an undefined attribute is being accessed.
+ ignore:Code. pytest_mock_example_attribute_that_shouldnt_exist is not defined in namespace .*:UserWarning
+ # The below warning is a consequence of how pytest detects fixtures and how DefinedNamespace behaves when an undefined attribute is being accessed.
+ ignore:Code. _pytestfixturefunction is not defined in namespace .*:UserWarning
diff --git a/setup.py b/setup.py
index 8255e3a1f..400ef1051 100644
--- a/setup.py
+++ b/setup.py
@@ -6,18 +6,20 @@
from setuptools import setup, find_packages
kwargs = {}
-kwargs["install_requires"] = ["isodate", "pyparsing", "setuptools", "importlib-metadata; python_version < '3.8.0'"]
+kwargs["install_requires"] = [
+ "isodate",
+ "pyparsing",
+ "setuptools",
+ "importlib-metadata; python_version < '3.8.0'",
+]
kwargs["tests_require"] = [
+ "berkeleydb",
"html5lib",
"networkx",
- "nose==1.3.7",
- "nose-timer",
- "coverage",
- "black==21.9b0",
- "flake8",
- "doctest-ignore-unicode==0.1.2",
+ "pytest",
+ "pytest-cov",
+ "pytest-subtests",
]
-kwargs["test_suite"] = "nose.collector"
kwargs["extras_require"] = {
"html": ["html5lib"],
"tests": kwargs["tests_require"],
diff --git a/test/README b/test/README
deleted file mode 100644
index 03b0e81a6..000000000
--- a/test/README
+++ /dev/null
@@ -1,48 +0,0 @@
-
-Various unit tests for rdflib
-
-Graph tests
-===========
-
-(Graphs are mostly tested through the store tests - detailed below)
-
-test_aggregate_graphs - special tests for the ReadOnlyGraphAggregate class
-
-Store tests
-===========
-
-These tests test all stores plugins that are registered, i.e. you may test more than just core rdflib:
-
-test_graph - all stores
-test_graph_context - only context aware stores
-test_graph_formula - only formula aware stores
-
-
-Syntax tests
-============
-
-test_n3 - test misc n3 features
-test_n3_suite - n3 test-cases in n3/*
-
-test_nt_misc - test misc nt features
-
-test_rdfxml - rdf-wg RDF/XML test-cases in rdf/*
-
-test_trix - trix test-cases in trix/*
-
-test_nquads - nquads test-cases in nquads/*
-
-test_roundtrip - roundtrip testing of all files nt/*
- All parser/serializer pairs that are registered are tested, i.e you may test more than just core rdflib.
-
-Misc tests
-==========
-
-test_finalnewline - test that all serializers produce output with a final newline
-
-test_conneg - test content negotiation when reading remote graphs
-
-
-
-
-
diff --git a/test/README.rst b/test/README.rst
new file mode 100644
index 000000000..4ed6510a4
--- /dev/null
+++ b/test/README.rst
@@ -0,0 +1,105 @@
+
+Various unit tests for rdflib
+
+Graph tests
+===========
+
+(Graphs are mostly tested through the store tests - detailed below)
+
+test_aggregate_graphs - special tests for the ReadOnlyGraphAggregate class
+
+Store tests
+===========
+
+These tests test all stores plugins that are registered, i.e. you may test more than just core rdflib:
+
+test_graph - all stores
+test_graph_context - only context aware stores
+test_graph_formula - only formula aware stores
+
+
+Syntax tests
+============
+
+test_n3 - test misc n3 features
+test_n3_suite - n3 test-cases in n3/*
+
+test_nt_misc - test misc nt features
+
+test_rdfxml - rdf-wg RDF/XML test-cases in rdf/*
+
+test_trix - trix test-cases in trix/*
+
+test_nquads - nquads test-cases in nquads/*
+
+test_roundtrip - roundtrip testing of all files nt/*
+ All parser/serializer pairs that are registered are tested, i.e you may test more than just core rdflib.
+
+Misc tests
+==========
+
+test_finalnewline - test that all serializers produce output with a final newline
+
+test_conneg - test content negotiation when reading remote graphs
+
+
+EARL Test Reports
+=================
+
+EARL test reports can be generated using the EARL reporter plugin from ``earl.py``.
+
+When this plugin is enabled it will create an ``earl:Assertion`` for every test that has a ``rdf_test_uri`` parameter which can be either a string or an ``URIRef``.
+
+To enable the EARL reporter plugin an output file path must be supplied to pytest with ``--earl-report``. The report will be written to this location in turtle format.
+
+Some examples of generating test reports:
+
+.. code-block:: bash
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-jsonld-local.ttl \
+ test/jsonld/test_localsuite.py
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-jsonld-v1.1.ttl \
+ test/jsonld/test_onedotone.py
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-jsonld-v1.0.ttl \
+ test/jsonld/test_testsuite.py
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-sparql.ttl \
+ test/test_dawg.py
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-nquads.ttl \
+ test/test_nquads_w3c.py
+
+ pytest \
+ --earl-asserter-homepage=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-nt.ttl \
+ test/test_nt_w3c.py
+
+ pytest \
+ --earl-asserter-uri=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-trig.ttl \
+ test/test_trig_w3c.py
+
+ pytest \
+ --earl-asserter-uri=http://example.com \
+ --earl-asserter-name 'Example Name' \
+ --earl-report=/var/tmp/earl/earl-turtle.ttl \
+ test/test_turtle_w3c.py
diff --git a/test/conftest.py b/test/conftest.py
new file mode 100644
index 000000000..d34aeb057
--- /dev/null
+++ b/test/conftest.py
@@ -0,0 +1,3 @@
+from .earl import EarlReporter
+
+pytest_plugins = [EarlReporter.__module__]
diff --git a/test/earl.py b/test/earl.py
index 54df7d3e9..e211bd297 100644
--- a/test/earl.py
+++ b/test/earl.py
@@ -1,48 +1,287 @@
+"""
+PYTEST_DONT_REWRITE
+"""
+import enum
+import logging
from datetime import datetime
+from pathlib import Path
+from test.manifest import RDFT
+from typing import TYPE_CHECKING, Generator, Optional, Tuple, cast
-from rdflib import Graph, URIRef, Literal, BNode, RDF, Namespace
-from rdflib.namespace import FOAF, DOAP, DC
+import pytest
-from nose.tools import nottest
+from pytest import Item
-EARL = Namespace("http://www.w3.org/ns/earl#")
+from rdflib import RDF, BNode, Graph, Literal, Namespace, URIRef
+from rdflib.namespace import DC, DOAP, FOAF, DefinedNamespace
+from rdflib.term import Node
-report = Graph()
+if TYPE_CHECKING:
+ from _pytest.main import Session
+ from _pytest.python import CallSpec2
+ from _pytest.reports import TestReport
+ from _pytest.runner import CallInfo
+ from pluggy._result import _Result
-report.bind("foaf", FOAF)
-report.bind("earl", EARL)
-report.bind("doap", DOAP)
-report.bind("dc", DC)
-me = URIRef("http://gromgull.net/me")
-report.add((me, RDF.type, FOAF.Person))
-report.add((me, FOAF.homepage, URIRef("http://gromgull.net")))
-report.add((me, FOAF.name, Literal("Gunnar Aastrand Grimnes")))
+class EARL(DefinedNamespace):
+ _fail = True
+ _NS = Namespace("http://www.w3.org/ns/earl#")
-rdflib = URIRef("https://github.com/RDFLib/rdflib")
+ assertedBy: URIRef # assertor of an assertion
+ Assertion: URIRef # a statement that embodies the results of a test
+ Assertor: URIRef # an entity such as a person, a software tool, an organization, or any other grouping that carries out a test collectively
+ automatic: URIRef # where the test was carried out automatically by the software tool and without any human intervention
+ CannotTell: URIRef # the class of outcomes to denote an undetermined outcome
+ cantTell: URIRef # it is unclear if the subject passed or failed the test
+ failed: URIRef # the subject failed the test
+ Fail: URIRef # the class of outcomes to denote failing a test
+ inapplicable: URIRef # the test is not applicable to the subject
+ info: URIRef # additional warnings or error messages in a human-readable form
+ mainAssertor: URIRef # assertor that is primarily responsible for performing the test
+ manual: URIRef # where the test was carried out by human evaluators
+ mode: URIRef # mode in which the test was performed
+ NotApplicable: URIRef # the class of outcomes to denote the test is not applicable
+ NotTested: URIRef # the class of outcomes to denote the test has not been carried out
+ outcome: URIRef # outcome of performing the test
+ OutcomeValue: URIRef # a discrete value that describes a resulting condition from carrying out the test
+ passed: URIRef # the subject passed the test
+ Pass: URIRef # the class of outcomes to denote passing a test
+ pointer: URIRef # location within a test subject that are most relevant to a test result
+ result: URIRef # result of an assertion
+ semiAuto: URIRef # where the test was partially carried out by software tools, but where human input or judgment was still required to decide or help decide the outcome of the test
+ Software: URIRef # any piece of software such as an authoring tool, browser, or evaluation tool
+ subject: URIRef # test subject of an assertion
+ TestCase: URIRef # an atomic test, usually one that is a partial test for a requirement
+ TestCriterion: URIRef # a testable statement, usually one that can be passed or failed
+ TestMode: URIRef # describes how a test was carried out
+ TestRequirement: URIRef # a higher-level requirement that is tested by executing one or more sub-tests
+ TestResult: URIRef # the actual result of performing the test
+ TestSubject: URIRef # the class of things that have been tested against some test criterion
+ test: URIRef # test criterion of an assertion
+ undisclosed: URIRef # where the exact testing process is undisclosed
+ unknownMode: URIRef # where the testing process is unknown or undetermined
+ untested: URIRef # the test has not been carried out
-report.add((rdflib, DOAP.homepage, rdflib))
-report.add((rdflib, DOAP.name, Literal("rdflib")))
-report.add((rdflib, DOAP.developer, me))
-report.add((rdflib, RDF.type, DOAP.Project))
-now = Literal(datetime.now())
+class EarlReport:
+ """
+ This is a helper class for building an EARL report graph.
+ """
+ def __init__(
+ self,
+ asserter_uri: Optional[str] = None,
+ asserter_homepage: Optional[str] = None,
+ asserter_name: Optional[str] = None,
+ ) -> None:
+ self.graph = graph = Graph()
+ graph.bind("foaf", FOAF)
+ graph.bind("earl", EARL)
+ graph.bind("doap", DOAP)
+ graph.bind("dc", DC)
-@nottest
-def add_test(test, res, info=None):
- a = BNode()
- report.add((a, RDF.type, EARL.Assertion))
- report.add((a, EARL.assertedBy, me))
- report.add((a, EARL.test, test))
- report.add((a, EARL.subject, rdflib))
+ self.asserter: Node
+ asserter: Node
+ if asserter_uri is not None or asserter_homepage is not None:
+ self.asserter = asserter = URIRef(
+ asserter_homepage if asserter_uri is None else asserter_uri
+ )
+ graph.add((asserter, RDF.type, FOAF.Person))
+ else:
+ self.asserter = asserter = BNode()
+ graph.add((asserter, RDF.type, FOAF.Person))
+ if asserter_name:
+ graph.add((asserter, FOAF.name, Literal(asserter_name)))
+ if asserter_homepage:
+ graph.add((asserter, FOAF.homepage, URIRef(asserter_homepage)))
- report.add((a, DC.date, now))
+ self.project = project = URIRef("https://github.com/RDFLib/rdflib")
- r = BNode()
- report.add((a, EARL.result, r))
- report.add((r, RDF.type, EARL.TestResult))
+ graph.add((project, DOAP.homepage, project))
+ graph.add((project, DOAP.name, Literal("RDFLib")))
+ graph.add((project, RDF.type, DOAP.Project))
+ graph.add((project, DOAP["programming-language"], Literal("Python")))
+ graph.add(
+ (
+ project,
+ DOAP.description,
+ Literal(
+ (
+ "RDFLib is a Python library for working with RDF, "
+ "a simple yet powerful language for representing information."
+ ),
+ lang="en",
+ ),
+ )
+ )
- report.add((r, EARL.outcome, EARL[res]))
- if info:
- report.add((r, EARL.info, Literal(info)))
+ self.now = Literal(datetime.now())
+
+ def add_test_outcome(
+ self, test_id: URIRef, outcome: URIRef, info: Optional[Literal] = None
+ ) -> Tuple[Node, Node]:
+ graph = self.graph
+ assertion = BNode()
+ graph.add((assertion, RDF.type, EARL.Assertion))
+ graph.add((assertion, EARL.test, test_id))
+ graph.add((assertion, EARL.subject, self.project))
+ graph.add((assertion, EARL.mode, EARL.automatic))
+ if self.asserter:
+ graph.add((assertion, EARL.assertedBy, self.asserter))
+
+ result = BNode()
+ graph.add((assertion, EARL.result, result))
+ graph.add((result, RDF.type, EARL.TestResult))
+ graph.add((result, DC.date, self.now))
+ graph.add((result, EARL.outcome, outcome))
+ if info:
+ graph.add((result, EARL.info, info))
+
+ return graph, result
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group.addoption(
+ "--earl-report",
+ action="store",
+ dest="earl_path",
+ metavar="path",
+ default=None,
+ help="create EARL report file at given path.",
+ )
+
+ group.addoption(
+ "--earl-asserter-uri",
+ action="store",
+ dest="earl_asserter_uri",
+ metavar="uri",
+ default=None,
+ help="Set the EARL asserter URI, defaults to the asserter homepage if not set.",
+ )
+
+ group.addoption(
+ "--earl-asserter-homepage",
+ action="store",
+ dest="earl_asserter_homepage",
+ metavar="URL",
+ default=None,
+ help="Set the EARL asserter homepage.",
+ )
+
+ group.addoption(
+ "--earl-asserter-name",
+ action="store",
+ dest="earl_asserter_name",
+ metavar="name",
+ default=None,
+ help="Set the EARL asserter name.",
+ )
+
+
+def pytest_configure(config):
+ earl_path = config.option.earl_path
+ if earl_path:
+ config._earl = EarlReporter(
+ Path(earl_path),
+ EarlReport(
+ asserter_uri=config.option.earl_asserter_uri,
+ asserter_name=config.option.earl_asserter_name,
+ asserter_homepage=config.option.earl_asserter_homepage,
+ ),
+ )
+ config.pluginmanager.register(config._earl)
+
+
+def pytest_unconfigure(config):
+ earl = getattr(config, "_excel", None)
+ if earl:
+ del config._earl
+ config.pluginmanager.unregister(earl)
+
+
+# https://docs.pytest.org/en/latest/reference.html#pytest.hookspec.pytest_runtest_protocol
+
+
+class TestResult(enum.Enum):
+ PASS = enum.auto()
+ FAIL = enum.auto()
+ ERROR = enum.auto()
+ SKIP = enum.auto()
+
+
+class TestReportHelper:
+ @classmethod
+ def get_rdf_test_uri(cls, report: "TestReport") -> Optional[URIRef]:
+ return next(
+ (
+ cast(URIRef, item[1])
+ for item in report.user_properties
+ if item[0] == RDFT.Test
+ ),
+ None,
+ )
+
+
+class EarlReporter:
+ """
+ This class is a pytest plugin that will write a EARL report with results for
+ every pytest which has a rdf_test_uri parameter that is a string or an
+ URIRef.
+ """
+
+ def __init__(self, output_path: Path, report: Optional[EarlReport] = None) -> None:
+ self.report = report if report is not None else EarlReport()
+ self.output_path = output_path
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_makereport(
+ self, item: Item, call: "CallInfo[None]"
+ ) -> Generator[None, "_Result", None]:
+ result = yield
+
+ report: "TestReport" = result.get_result()
+
+ if not hasattr(item, "callspec"):
+ return
+ callspec: "CallSpec2" = getattr(item, "callspec")
+ rdf_test_uri = callspec.params.get("rdf_test_uri")
+ if rdf_test_uri is None:
+ return
+ if not isinstance(rdf_test_uri, URIRef) and not isinstance(rdf_test_uri, str):
+ logging.warning("rdf_test_uri parameter is not a URIRef or a str")
+ return
+ if not isinstance(rdf_test_uri, URIRef):
+ rdf_test_uri = URIRef(rdf_test_uri)
+
+ report.user_properties.append((RDFT.Test, rdf_test_uri))
+
+ def append_result(self, report: "TestReport", test_result: TestResult) -> None:
+ rdf_test_uri = TestReportHelper.get_rdf_test_uri(report)
+ if rdf_test_uri is None:
+ # No RDF test
+ return
+ if test_result is TestResult.PASS:
+ self.report.add_test_outcome(rdf_test_uri, EARL.passed)
+ elif test_result is TestResult.FAIL:
+ self.report.add_test_outcome(rdf_test_uri, EARL.failed)
+ elif (test_result) is TestResult.SKIP:
+ self.report.add_test_outcome(rdf_test_uri, EARL.untested)
+ else:
+ self.report.add_test_outcome(rdf_test_uri, EARL.cantTell)
+
+ def pytest_runtest_logreport(self, report: "TestReport") -> None:
+ if report.passed:
+ if report.when == "call": # ignore setup/teardown
+ self.append_result(report, TestResult.PASS)
+ elif report.failed:
+ if report.when == "call": # ignore setup/teardown
+ self.append_result(report, TestResult.FAIL)
+ else:
+ self.append_result(report, TestResult.ERROR)
+ elif report.skipped:
+ self.append_result(report, TestResult.SKIP)
+
+ def pytest_sessionfinish(self, session: "Session"):
+ self.report.graph.serialize(format="turtle", destination=self.output_path)
diff --git a/test/jsonld/test_compaction.py b/test/jsonld/test_compaction.py
index 7ea409a92..e3db6fd4b 100644
--- a/test/jsonld/test_compaction.py
+++ b/test/jsonld/test_compaction.py
@@ -3,6 +3,8 @@
import re
import json
import itertools
+
+import pytest
from rdflib import Graph
from rdflib.plugin import register, Serializer
@@ -252,6 +254,6 @@ def sort_graph(data):
data["@graph"].sort(key=lambda node: node.get("@id"))
-def test_cases():
- for data, expected in cases:
- yield run, data, expected
+@pytest.mark.parametrize("data, expected", cases)
+def test_cases(data, expected):
+ run(data, expected)
diff --git a/test/jsonld/test_context.py b/test/jsonld/test_context.py
index a4d3d772e..2bff46734 100644
--- a/test/jsonld/test_context.py
+++ b/test/jsonld/test_context.py
@@ -136,9 +136,11 @@ def test_prefix_like_vocab():
def _mock_source_loader(f):
@wraps(f)
def _wrapper():
- context.source_to_json = SOURCES.get
- f()
- context.source_to_json = _source_to_json
+ try:
+ context.source_to_json = SOURCES.get
+ f()
+ finally:
+ context.source_to_json = _source_to_json
return _wrapper
diff --git a/test/jsonld/test_localsuite.py b/test/jsonld/test_localsuite.py
index 307016538..fd51586af 100644
--- a/test/jsonld/test_localsuite.py
+++ b/test/jsonld/test_localsuite.py
@@ -1,6 +1,10 @@
import os
from os import environ, chdir, getcwd, path as p
import json
+
+import pytest
+
+from rdflib.term import URIRef
from . import runner
@@ -24,18 +28,32 @@ def read_manifest():
yield category, name, inputpath, expectedpath, context, options
-def test_suite():
+def get_test_suite_cases():
+ for cat, num, inputpath, expectedpath, context, options in read_manifest():
+ if inputpath.endswith(".jsonld"): # toRdf
+ if expectedpath.endswith(".jsonld"): # compact/expand/flatten
+ func = runner.do_test_json
+ else: # toRdf
+ func = runner.do_test_parser
+ else: # fromRdf
+ func = runner.do_test_serializer
+ rdf_test_uri = URIRef("{0}{1}-manifest.jsonld#t{2}".format(
+ TC_BASE, cat, num
+ ))
+ yield rdf_test_uri, func, TC_BASE, cat, num, inputpath, expectedpath, context, options
+
+
+@pytest.fixture(scope="module", autouse=True)
+def testsuide_dir():
old_cwd = getcwd()
chdir(testsuite_dir)
- try:
- for cat, num, inputpath, expectedpath, context, options in read_manifest():
- if inputpath.endswith(".jsonld"): # toRdf
- if expectedpath.endswith(".jsonld"): # compact/expand/flatten
- func = runner.do_test_json
- else: # toRdf
- func = runner.do_test_parser
- else: # fromRdf
- func = runner.do_test_serializer
- yield func, TC_BASE, cat, num, inputpath, expectedpath, context, options
- finally:
- chdir(old_cwd)
+ yield
+ chdir(old_cwd)
+
+
+@pytest.mark.parametrize(
+ "rdf_test_uri, func, suite_base, cat, num, inputpath, expectedpath, context, options",
+ get_test_suite_cases(),
+)
+def test_suite(rdf_test_uri: URIRef, func, suite_base, cat, num, inputpath, expectedpath, context, options):
+ func(suite_base, cat, num, inputpath, expectedpath, context, options)
diff --git a/test/jsonld/test_onedotone.py b/test/jsonld/test_onedotone.py
index 884e6f235..94b74d7c6 100644
--- a/test/jsonld/test_onedotone.py
+++ b/test/jsonld/test_onedotone.py
@@ -1,5 +1,9 @@
from os import environ, chdir, getcwd, path as p
import json
+
+import pytest
+
+from rdflib.term import URIRef
from . import runner
@@ -181,31 +185,46 @@ def read_manifest(skiptests):
yield category, testnum, inputpath, expectedpath, context, options
-def test_suite():
+def get_test_suite_cases():
skiptests = unsupported_tests
if SKIP_KNOWN_BUGS:
skiptests += known_bugs
+
+ for cat, num, inputpath, expectedpath, context, options in read_manifest(
+ skiptests
+ ):
+ if options:
+ if (
+ SKIP_1_0_TESTS
+ and "specVersion" in options
+ and str(options["specVersion"]).lower() == "json-ld-1.0"
+ ):
+ # Skip the JSON v1.0 tests
+ continue
+ if inputpath.endswith(".jsonld"): # toRdf
+ if expectedpath.endswith(".jsonld"): # compact/expand/flatten
+ func = runner.do_test_json
+ else: # toRdf
+ func = runner.do_test_parser
+ else: # fromRdf
+ func = runner.do_test_serializer
+ rdf_test_uri = URIRef("{0}{1}-manifest#t{2}".format(
+ TC_BASE, cat, num
+ ))
+ yield rdf_test_uri, func, TC_BASE, cat, num, inputpath, expectedpath, context, options
+
+
+@pytest.fixture(scope="module", autouse=True)
+def global_state():
old_cwd = getcwd()
chdir(test_dir)
- try:
- for cat, num, inputpath, expectedpath, context, options in read_manifest(
- skiptests
- ):
- if options:
- if (
- SKIP_1_0_TESTS
- and "specVersion" in options
- and str(options["specVersion"]).lower() == "json-ld-1.0"
- ):
- # Skip the JSON v1.0 tests
- continue
- if inputpath.endswith(".jsonld"): # toRdf
- if expectedpath.endswith(".jsonld"): # compact/expand/flatten
- func = runner.do_test_json
- else: # toRdf
- func = runner.do_test_parser
- else: # fromRdf
- func = runner.do_test_serializer
- yield func, TC_BASE, cat, num, inputpath, expectedpath, context, options
- finally:
- chdir(old_cwd)
+ yield
+ chdir(old_cwd)
+
+
+@pytest.mark.parametrize(
+ "rdf_test_uri, func, suite_base, cat, num, inputpath, expectedpath, context, options",
+ get_test_suite_cases(),
+)
+def test_suite(rdf_test_uri: URIRef, func, suite_base, cat, num, inputpath, expectedpath, context, options):
+ func(suite_base, cat, num, inputpath, expectedpath, context, options)
diff --git a/test/jsonld/test_testsuite.py b/test/jsonld/test_testsuite.py
index b33176e60..96cf78509 100644
--- a/test/jsonld/test_testsuite.py
+++ b/test/jsonld/test_testsuite.py
@@ -1,7 +1,10 @@
from os import environ, chdir, getcwd, path as p
import json
+
+import pytest
import rdflib
import rdflib.plugins.parsers.jsonld as parser
+from rdflib.term import URIRef
from . import runner
@@ -74,99 +77,44 @@ def read_manifest(skiptests):
yield category, testnum, inputpath, expectedpath, context, options
-def test_suite(skip_known_bugs=True):
- default_allow = rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS
- rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS = allow_lists_of_lists
+def get_test_suite_cases(skip_known_bugs=True):
skiptests = unsupported_tests
if skip_known_bugs:
skiptests += known_bugs
+ for cat, num, inputpath, expectedpath, context, options in read_manifest(
+ skiptests
+ ):
+ if inputpath.endswith(".jsonld"): # toRdf
+ if expectedpath.endswith(".jsonld"): # compact/expand/flatten
+ func = runner.do_test_json
+ else: # toRdf
+ func = runner.do_test_parser
+ else: # fromRdf
+ func = runner.do_test_serializer
+ # func.description = "%s-%s-%s" % (group, case)
+ rdf_test_uri = URIRef("{0}{1}-manifest.jsonld#t{2}".format(
+ TC_BASE, cat, num
+ ))
+ yield rdf_test_uri, func, TC_BASE, cat, num, inputpath, expectedpath, context, options
+
+
+@pytest.fixture(scope="module", autouse=True)
+def global_state():
+ old_version = runner.DEFAULT_PARSER_VERSION
+ runner.DEFAULT_PARSER_VERSION = 1.0
+ default_allow = rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS
+ rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS = allow_lists_of_lists
old_cwd = getcwd()
chdir(test_dir)
- runner.DEFAULT_PARSER_VERSION = 1.0
- try:
- for cat, num, inputpath, expectedpath, context, options in read_manifest(
- skiptests
- ):
- if inputpath.endswith(".jsonld"): # toRdf
- if expectedpath.endswith(".jsonld"): # compact/expand/flatten
- func = runner.do_test_json
- else: # toRdf
- func = runner.do_test_parser
- else: # fromRdf
- func = runner.do_test_serializer
- # func.description = "%s-%s-%s" % (group, case)
- yield func, TC_BASE, cat, num, inputpath, expectedpath, context, options
- finally:
- rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS = default_allow
- chdir(old_cwd)
-
-
-if __name__ == "__main__":
- import sys
- from rdflib import *
- from datetime import datetime
+ yield
+ rdflib.plugins.parsers.jsonld.ALLOW_LISTS_OF_LISTS = default_allow
+ runner.DEFAULT_PARSER_VERSION = old_version
+ chdir(old_cwd)
- EARL = Namespace("http://www.w3.org/ns/earl#")
- DC = Namespace("http://purl.org/dc/terms/")
- FOAF = Namespace("http://xmlns.com/foaf/0.1/")
- DOAP = Namespace("http://usefulinc.com/ns/doap#")
- rdflib_jsonld_page = "https://github.com/RDFLib/rdflib-jsonld"
- rdflib_jsonld = URIRef(rdflib_jsonld_page + "#it")
-
- args = sys.argv[1:]
- asserter = URIRef(args.pop(0)) if args else None
- asserter_name = Literal(args.pop(0)) if args else None
-
- graph = Graph()
-
- graph.parse(
- data="""
- @prefix earl: <{EARL}> .
- @prefix dc: <{DC}> .
- @prefix foaf: <{FOAF}> .
- @prefix doap: <{DOAP}> .
-
- <{rdflib_jsonld}> a doap:Project, earl:TestSubject, earl:Software ;
- doap:homepage <{rdflib_jsonld_page}> ;
- doap:name "RDFLib-JSONLD" ;
- doap:programming-language "Python" ;
- doap:title "RDFLib plugin for JSON-LD " .
- """.format(
- **vars()
- ),
- format="turtle",
- )
-
- if asserter_name:
- graph.add((asserter, RDF.type, FOAF.Person))
- graph.add((asserter, FOAF.name, asserter_name))
- graph.add((rdflib_jsonld, DOAP.developer, asserter))
-
- for args in test_suite(skip_known_bugs=False):
- try:
- args[0](*args[1:])
- success = True
- except AssertionError:
- success = False
- assertion = graph.resource(BNode())
- assertion.add(RDF.type, EARL.Assertion)
- assertion.add(EARL.mode, EARL.automatic)
- if asserter:
- assertion.add(EARL.assertedBy, asserter)
- assertion.add(EARL.subject, rdflib_jsonld)
- assertion.add(
- EARL.test,
- URIRef(
- "http://json-ld.org/test-suite/tests/{1}-manifest.jsonld#t{2}".format(
- *args
- )
- ),
- )
- result = graph.resource(BNode())
- assertion.add(EARL.result, result)
- result.add(RDF.type, EARL.TestResult)
- result.add(DC.date, Literal(datetime.utcnow()))
- result.add(EARL.outcome, EARL.passed if success else EARL.failed)
-
- graph.serialize(destination=sys.stdout)
+@pytest.mark.parametrize(
+ "rdf_test_uri, func, suite_base, cat, num, inputpath, expectedpath, context, options",
+ get_test_suite_cases(),
+)
+def test_suite(rdf_test_uri: URIRef, func, suite_base, cat, num, inputpath, expectedpath, context, options):
+ func(suite_base, cat, num, inputpath, expectedpath, context, options)
diff --git a/test/manifest.py b/test/manifest.py
index 23c66abea..e09257aaf 100644
--- a/test/manifest.py
+++ b/test/manifest.py
@@ -1,22 +1,60 @@
-from collections import namedtuple
-from nose.tools import nottest
+from typing import Iterable, List, NamedTuple, Optional, Tuple
-from rdflib import Graph, RDF, RDFS, Namespace
+from rdflib import RDF, RDFS, Graph, Namespace
+from rdflib.namespace import DefinedNamespace
+from rdflib.term import Node, URIRef
MF = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#")
QT = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-query#")
UP = Namespace("http://www.w3.org/2009/sparql/tests/test-update#")
-RDFT = Namespace("http://www.w3.org/ns/rdftest#")
+
+
+class RDFT(DefinedNamespace):
+ _fail = True
+ _NS = Namespace("http://www.w3.org/ns/rdftest#")
+
+ approval: URIRef # Approval status of a test.
+ Approval: URIRef # The superclass of all test approval statuses.
+ Approved: URIRef # Indicates that a test is approved.
+ Proposed: URIRef # Indicates that a test is proposed, but not approved.
+ Rejected: URIRef # Indicates that a test is not approved.
+ TestEval: URIRef # Superclass of all RDF Evaluation Tests.
+ TestNQuadsNegativeSyntax: URIRef # A negative N-Quads syntax test.
+ TestNQuadsPositiveSyntax: URIRef # A positive N-Quads syntax test.
+ TestNTriplesNegativeSyntax: URIRef # A negative N-Triples syntax test.
+ TestNTriplesPositiveSyntax: URIRef # A positive N-Triples syntax test.
+ TestSyntax: URIRef # Superclass of all RDF Syntax Tests.
+ TestTrigEval: URIRef # A positive TriG evaluation test.
+ TestTrigNegativeEval: URIRef # A negative TriG evaluation test.
+ TestTriGNegativeSyntax: URIRef # A negative TriG syntax test.
+ TestTriGPositiveSyntax: URIRef # A positive TriG syntax test.
+ TestTurtleEval: URIRef # A positive Turtle evaluation test.
+ TestTurtleNegativeEval: URIRef # A negative Turtle evaluation test.
+ TestTurtleNegativeSyntax: URIRef # A negative Turtle syntax test.
+ TestTurtlePositiveSyntax: URIRef # A positive Turtle syntax test.
+ Test: URIRef # Superclass of all RDF Tests.
+ TestXMLNegativeSyntax: URIRef # A negative RDF/XML syntax test.
+ XMLEval: URIRef # A positive RDF/XML evaluation test.
+
+ TestTrigPositiveSyntax: URIRef
+ TestTrigNegativeSyntax: URIRef
+
DAWG = Namespace("http://www.w3.org/2001/sw/DataAccess/tests/test-dawg#")
-RDFTest = namedtuple(
- "RDFTest",
- ["uri", "name", "comment", "data", "graphdata", "action", "result", "syntax"],
-)
+class RDFTest(NamedTuple):
+ uri: URIRef
+ name: str
+ comment: str
+ data: Node
+ graphdata: Optional[List[Node]]
+ action: Node
+ result: Optional[Node]
+ syntax: bool
-def read_manifest(f, base=None, legacy=False):
+
+def read_manifest(f, base=None, legacy=False) -> Iterable[Tuple[Node, URIRef, RDFTest]]:
def _str(x):
if x is not None:
return str(x)
@@ -33,6 +71,7 @@ def _str(x):
yield x
for col in g.objects(m, MF.entries):
+ e: URIRef
for e in g.items(col):
approved = (
@@ -137,7 +176,7 @@ def _str(x):
print("I dont know DAWG Test Type %s" % _type)
continue
- yield _type, RDFTest(
+ yield e, _type, RDFTest(
e,
_str(name),
_str(comment),
@@ -147,10 +186,3 @@ def _str(x):
res,
syntax,
)
-
-
-@nottest
-def nose_tests(testers, manifest, base=None, legacy=False):
- for _type, test in read_manifest(manifest, base, legacy):
- if _type in testers:
- yield testers[_type], test
diff --git a/test/test_canonicalization.py b/test/test_canonicalization.py
index 5649d67fc..87487c014 100644
--- a/test/test_canonicalization.py
+++ b/test/test_canonicalization.py
@@ -1,6 +1,8 @@
from collections import Counter
from typing import Set, Tuple
from unittest.case import expectedFailure
+
+import pytest
from rdflib.term import Node
from rdflib import Graph, RDF, BNode, URIRef, Namespace, ConjunctiveGraph, Literal
from rdflib.namespace import FOAF
@@ -204,6 +206,11 @@ def fn(rdf1, rdf2, identical):
yield fn, inputs[0], inputs[1], inputs[2]
+@pytest.mark.parametrize("fn, rdf1, rdf2, identical", negative_graph_match_test())
+def test_negative_graph_match(fn, rdf1, rdf2, identical):
+ fn(rdf1, rdf2, identical)
+
+
def test_issue494_collapsing_bnodes():
"""Test for https://github.com/RDFLib/rdflib/issues/494 collapsing BNodes"""
g = Graph()
diff --git a/test/test_conjunctive_graph.py b/test/test_conjunctive_graph.py
index ed775c4af..0cbe00771 100644
--- a/test/test_conjunctive_graph.py
+++ b/test/test_conjunctive_graph.py
@@ -2,6 +2,8 @@
Tests for ConjunctiveGraph that do not depend on the underlying store
"""
+import pytest
+
from rdflib import ConjunctiveGraph, Graph
from rdflib.term import Identifier, URIRef, BNode
from rdflib.parser import StringInputSource
@@ -46,7 +48,7 @@ def test_quad_contexts():
assert isinstance(q[3], Graph)
-def test_graph_ids():
+def get_graph_ids_tests():
def check(kws):
cg = ConjunctiveGraph()
cg.parse(**kws)
@@ -62,7 +64,6 @@ def check(kws):
yield check, dict(source=source, format="turtle")
-if __name__ == "__main__":
- import nose
-
- nose.main(defaultTest=__name__)
+@pytest.mark.parametrize("checker, kws", get_graph_ids_tests())
+def test_graph_ids(checker, kws):
+ checker(kws)
diff --git a/test/test_dataset.py b/test/test_dataset.py
index e56e4e290..001fcb74e 100644
--- a/test/test_dataset.py
+++ b/test/test_dataset.py
@@ -4,12 +4,11 @@
from tempfile import mkdtemp, mkstemp
import shutil
+
+import pytest
from rdflib import Dataset, URIRef, plugin
from rdflib.graph import DATASET_DEFAULT_GRAPH_ID
-from nose.exc import SkipTest
-
-
# Will also run SPARQLUpdateStore tests against local SPARQL1.1 endpoint if
# available. This assumes SPARQL1.1 query/update endpoints running locally at
# http://localhost:3030/db/
@@ -35,7 +34,7 @@ def setUp(self):
try:
self.graph = Dataset(store=self.store)
except ImportError:
- raise SkipTest("Dependencies for store '%s' not available!" % self.store)
+ pytest.skip("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
_, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
elif self.store == "SPARQLUpdateStore":
diff --git a/test/test_dawg.py b/test/test_dawg.py
index c8af206d3..b16150917 100644
--- a/test/test_dawg.py
+++ b/test/test_dawg.py
@@ -7,8 +7,8 @@
# http://www.w3.org/2009/sparql/docs/tests/data-sparql11/
# syntax-update-2/manifest#syntax-update-other-01
from test import TEST_DIR
-from test.earl import report, add_test
-from test.manifest import nose_tests, UP, MF
+from test.manifest import UP, MF, RDFTest, read_manifest
+import pytest
sys.setrecursionlimit(6000) # default is 1000
@@ -19,9 +19,11 @@
import datetime
import isodate
import typing
+from typing import Dict, Callable
from rdflib import Dataset, Graph, URIRef, BNode
+from rdflib.term import Node
from rdflib.query import Result
from rdflib.compare import isomorphic
@@ -35,12 +37,10 @@
from urllib.parse import urljoin
from io import BytesIO
-from nose.tools import nottest, eq_
-from nose import SkipTest
-
def eq(a, b, msg):
- return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
+ # return eq_(a, b, msg + ": (%r!=%r)" % (a, b))
+ assert a == b, msg + ": (%r!=%r)" % (a, b)
def setFlags():
@@ -194,7 +194,6 @@ def pp_binding(solutions):
)
-@nottest
def update_test(t):
# the update-eval tests refer to graphs on http://example.org
@@ -203,7 +202,7 @@ def update_test(t):
uri, name, comment, data, graphdata, query, res, syntax = t
if uri in skiptests:
- raise SkipTest()
+ pytest.skip()
try:
g = Dataset()
@@ -325,7 +324,6 @@ def update_test(t):
raise
-@nottest # gets called by generator
def query_test(t):
uri, name, comment, data, graphdata, query, resfile, syntax = t
@@ -333,7 +331,7 @@ def query_test(t):
rdflib_sparql_module.SPARQL_LOAD_GRAPHS = True
if uri in skiptests:
- raise SkipTest()
+ pytest.skip()
def skip(reason="(none)"):
print("Skipping %s from now on." % uri)
@@ -496,12 +494,10 @@ def skip(reason="(none)"):
import pdb
pdb.post_mortem(sys.exc_info()[2])
- # pdb.set_trace()
- # nose.tools.set_trace()
raise
-testers = {
+testers: Dict[Node, Callable[[RDFTest], None]] = {
UP.UpdateEvaluationTest: update_test,
MF.UpdateEvaluationTest: update_test,
MF.PositiveUpdateSyntaxTest11: update_test,
@@ -513,125 +509,31 @@ def skip(reason="(none)"):
}
-def test_dawg():
-
+@pytest.fixture(scope="module", autouse=True)
+def handle_flags():
setFlags()
-
- if SPARQL10Tests:
- for t in nose_tests(testers, "test/DAWG/data-r2/manifest-evaluation.ttl"):
- yield t
-
- if SPARQL11Tests:
- for t in nose_tests(testers, "test/DAWG/data-sparql11/manifest-all.ttl"):
- yield t
-
- if RDFLibTests:
- for t in nose_tests(testers, "test/DAWG/rdflib/manifest.ttl"):
- yield t
-
+ yield
resetFlags()
-if __name__ == "__main__":
-
- import sys
- import time
-
- start = time.time()
- if len(sys.argv) > 1:
- NAME = sys.argv[1]
- DEBUG_FAIL = True
- i = 0
- success = 0
-
- skip = 0
-
- for _type, t in test_dawg():
-
- if NAME and not str(t[0]).startswith(NAME):
- continue
- i += 1
- try:
-
- _type(t)
-
- add_test(t[0], "passed")
- success += 1
-
- except SkipTest as e:
- msg = skiptests.get(t[0], e.args)
- add_test(t[0], "untested", msg)
- print("skipping %s - %s" % (t[0], msg))
- skip += 1
-
- except KeyboardInterrupt:
- raise
- except AssertionError:
- add_test(t[0], "failed")
- except:
- add_test(t[0], "failed", "error")
- import traceback
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest("test/DAWG/data-r2/manifest-evaluation.ttl"),
+)
+def test_dawg_data_sparql10(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
- traceback.print_exc()
- sys.stderr.write("%s\n" % t[0])
- print("\n----------------------------------------------------\n")
- print("Failed tests:")
- for failed in failed_tests:
- print(failed)
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest("test/DAWG/data-sparql11/manifest-all.ttl"),
+)
+def test_dawg_data_sparql11(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
- print("\n----------------------------------------------------\n")
- print("Error tests:")
- for error in error_tests:
- print(error)
- print("\n----------------------------------------------------\n")
-
- print("Most common fails:")
- for failed in fails.most_common(10):
- failed_str = str(failed)
- print(failed_str[:450] + (failed_str[450:] and "..."))
-
- print("\n----------------------------------------------------\n")
-
- if errors:
- print("Most common errors:")
- for error in errors.most_common(10):
- print(error)
- else:
- print("(no errors!)")
-
- f_sum = sum(fails.values())
- e_sum = sum(errors.values())
-
- if success + f_sum + e_sum + skip != i:
- print("(Something is wrong, %d!=%d)" % (success + f_sum + e_sum + skip, i))
-
- print(
- "\n%d tests, %d passed, %d failed, %d errors, \
- %d skipped (%.2f%% success)"
- % (i, success, f_sum, e_sum, skip, 100.0 * success / i)
- )
- print("Took %.2fs" % (time.time() - start))
-
- if not NAME:
-
- now = isodate.datetime_isoformat(datetime.datetime.utcnow())
-
- with open("testruns.txt", "a") as tf:
- tf.write(
- "%s\n%d tests, %d passed, %d failed, %d errors, %d "
- "skipped (%.2f%% success)\n\n"
- % (now, i, success, f_sum, e_sum, skip, 100.0 * success / i)
- )
-
- earl_report = os.path.join(
- TEST_DIR, "../test_reports/rdflib_sparql-%s.ttl" % now.replace(":", "")
- )
-
- report.serialize(earl_report, format="n3")
- report.serialize(
- os.path.join(TEST_DIR, "../test_reports/rdflib_sparql-latest.ttl"),
- format="n3",
- )
- print("Wrote EARL-report to '%s'" % earl_report)
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test", read_manifest("test/DAWG/rdflib/manifest.ttl")
+)
+def test_dawg_rdflib(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
diff --git a/test/test_evaluate_bind.py b/test/test_evaluate_bind.py
index 382b4ed5e..928acbb33 100644
--- a/test/test_evaluate_bind.py
+++ b/test/test_evaluate_bind.py
@@ -2,10 +2,12 @@
Verify evaluation of BIND expressions of different types. See
.
"""
+import pytest
+
from rdflib import Graph, URIRef, Literal, Variable
-def test_bind():
+def get_bind_tests():
base = "http://example.org/"
g = Graph()
g.add((URIRef(base + "thing"), URIRef(base + "ns#comment"), Literal("anything")))
@@ -34,3 +36,7 @@ def check(expr, var, obj):
"type",
URIRef("http://example.org/ns#Thing"),
)
+
+@pytest.mark.parametrize("checker, expr, var, obj", get_bind_tests())
+def test_bind(checker, expr, var, obj) -> None:
+ checker(expr, var, obj)
diff --git a/test/test_expressions.py b/test/test_expressions.py
index 1323e4fc9..f667a3ade 100644
--- a/test/test_expressions.py
+++ b/test/test_expressions.py
@@ -7,7 +7,7 @@
from rdflib import Variable, Literal
-from nose.tools import eq_ as eq
+from .testutils import eq_ as eq
def _eval(e, ctx=None):
@@ -150,10 +150,3 @@ def test_and_or():
bool(_eval(_translate((p.Expression.parseString("(2>1 || 3>2) && 3>4")[0])))),
False,
)
-
-
-if __name__ == "__main__":
- import nose
- import sys
-
- nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_extras_external_graph_libs.py b/test/test_extras_external_graph_libs.py
index 25b692988..6b1c942a7 100644
--- a/test/test_extras_external_graph_libs.py
+++ b/test/test_extras_external_graph_libs.py
@@ -1,12 +1,11 @@
-from nose import SkipTest
from rdflib import Graph, URIRef, Literal
-
+import pytest
def test_rdflib_to_networkx():
try:
import networkx
except ImportError:
- raise SkipTest("couldn't find networkx")
+ pytest.skip("couldn't find networkx")
from rdflib.extras.external_graph_libs import rdflib_to_networkx_multidigraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_digraph
from rdflib.extras.external_graph_libs import rdflib_to_networkx_graph
@@ -55,7 +54,7 @@ def test_rdflib_to_graphtool():
try:
from graph_tool import util as gt_util
except ImportError:
- raise SkipTest("couldn't find graph_tool")
+ pytest.skip("couldn't find graph_tool")
from rdflib.extras.external_graph_libs import rdflib_to_graphtool
g = Graph()
@@ -84,10 +83,3 @@ def test_rdflib_to_graphtool():
epterm = mdg.edge_properties["name"]
assert len(list(gt_util.find_edge(mdg, epterm, str(p)))) == 3
assert len(list(gt_util.find_edge(mdg, epterm, str(q)))) == 1
-
-
-if __name__ == "__main__":
- import sys
- import nose
-
- nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_finalnewline.py b/test/test_finalnewline.py
index e0e790327..6eeb34a77 100644
--- a/test/test_finalnewline.py
+++ b/test/test_finalnewline.py
@@ -26,12 +26,3 @@ def testFinalNewline():
# JSON-LD does not require a final newline (because JSON doesn't)
failed = failed.difference({"json-ld", "application/ld+json"})
assert len(failed) == 0, "No final newline for formats: '%s'" % failed
-
-
-if __name__ == "__main__":
-
- import sys
- import nose
-
- if len(sys.argv) == 1:
- nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_graph.py b/test/test_graph.py
index 8143eff91..499c9cbc6 100644
--- a/test/test_graph.py
+++ b/test/test_graph.py
@@ -6,13 +6,13 @@
import shutil
from urllib.error import URLError, HTTPError
+import pytest
+
from rdflib import URIRef, Graph, plugin
from rdflib.exceptions import ParserError
from rdflib.plugin import PluginException
from rdflib.namespace import Namespace
-from nose.exc import SkipTest
-
from pathlib import Path
from test.testutils import GraphHelper
@@ -26,7 +26,7 @@ def setUp(self):
try:
self.graph = Graph(store=self.store)
except ImportError:
- raise SkipTest("Dependencies for store '%s' not available!" % self.store)
+ pytest.skip("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
_, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
@@ -304,7 +304,7 @@ def testGuessFormatForParse(self):
-
+
"""
self.graph.parse(data=rdf, format="xml")
diff --git a/test/test_graph_context.py b/test/test_graph_context.py
index 52220d2cc..ab4df544f 100644
--- a/test/test_graph_context.py
+++ b/test/test_graph_context.py
@@ -4,10 +4,9 @@
from tempfile import mkdtemp, mkstemp
import shutil
-from rdflib import Graph, ConjunctiveGraph, URIRef, BNode, plugin
-
-from nose.exc import SkipTest
+import pytest
+from rdflib import Graph, ConjunctiveGraph, URIRef, BNode, plugin
class ContextTestCase(unittest.TestCase):
store = "default"
@@ -18,7 +17,7 @@ def setUp(self):
try:
self.graph = ConjunctiveGraph(store=self.store)
except ImportError:
- raise SkipTest("Dependencies for store '%s' not available!" % self.store)
+ pytest.skip("Dependencies for store '%s' not available!" % self.store)
if self.store == "SQLite":
_, self.tmppath = mkstemp(prefix="test", dir="/tmp", suffix=".sqlite")
else:
@@ -99,7 +98,7 @@ def addStuffInMultipleContexts(self):
def testConjunction(self):
if self.store == "SQLite":
- raise SkipTest("Skipping known issue with __len__")
+ pytest.skip("Skipping known issue with __len__")
self.addStuffInMultipleContexts()
triple = (self.pizza, self.likes, self.pizza)
# add to context 1
@@ -132,7 +131,7 @@ def testLenInOneContext(self):
def testLenInMultipleContexts(self):
if self.store == "SQLite":
- raise SkipTest("Skipping known issue with __len__")
+ pytest.skip("Skipping known issue with __len__")
oldLen = len(self.graph)
self.addStuffInMultipleContexts()
diff --git a/test/test_graph_formula.py b/test/test_graph_formula.py
index 14073054a..c8ee9d897 100644
--- a/test/test_graph_formula.py
+++ b/test/test_graph_formula.py
@@ -1,8 +1,8 @@
-from nose.exc import SkipTest
-from nose.tools import nottest
import sys
import os
from tempfile import mkdtemp, mkstemp
+
+import pytest
from rdflib import RDF, RDFS, URIRef, BNode, Variable, plugin
from rdflib.graph import QuotedGraph, ConjunctiveGraph
@@ -20,12 +20,11 @@
# Thorough test suite for formula-aware store
-@nottest # do not run on its own - only as part of generator
-def testFormulaStore(store="default", configString=None):
+def checkFormulaStore(store="default", configString=None):
try:
g = ConjunctiveGraph(store=store)
except ImportError:
- raise SkipTest("Dependencies for store '%s' not available!" % store)
+ pytest.skip("Dependencies for store '%s' not available!" % store)
if configString:
g.destroy(configString)
@@ -126,11 +125,8 @@ def testFormulaStore(store="default", configString=None):
raise
-def testFormulaStores():
+def get_formula_stores_tests():
pluginname = None
- if __name__ == "__main__":
- if len(sys.argv) > 1:
- pluginname = sys.argv[1]
for s in plugin.plugins(pluginname, plugin.Store):
if s.name in (
@@ -142,10 +138,8 @@ def testFormulaStores():
continue
if not s.getClass().formula_aware:
continue
- yield testFormulaStore, s.name
-
-
-if __name__ == "__main__":
- import nose
+ yield checkFormulaStore, s.name
- nose.main(defaultTest=sys.argv[0])
+@pytest.mark.parametrize("checker, name", get_formula_stores_tests())
+def test_formula_stores(checker, name) -> None:
+ checker(name)
diff --git a/test/test_initbindings.py b/test/test_initbindings.py
index 138041b29..bb1fc3234 100644
--- a/test/test_initbindings.py
+++ b/test/test_initbindings.py
@@ -1,5 +1,3 @@
-from nose import SkipTest
-
from rdflib.plugins.sparql import prepareQuery
@@ -345,12 +343,3 @@ def testFilter():
)
)
assert len(results) == 1, results
-
-
-if __name__ == "__main__":
-
- import sys
- import nose
-
- if len(sys.argv) == 1:
- nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_issue190.py b/test/test_issue190.py
index 51191084a..458d23903 100644
--- a/test/test_issue190.py
+++ b/test/test_issue190.py
@@ -1,10 +1,10 @@
# -*- coding: utf-8 -*-
import unittest
-from nose import SkipTest
from rdflib.graph import ConjunctiveGraph
from rdflib.parser import StringInputSource
import textwrap
+import pytest
prefix = textwrap.dedent(
"""\
@@ -42,7 +42,7 @@
Betriebsnummer der Einzugsstelle:\nKnappschaft\n980 0000 6\nWICHTIGES DOKUMENT - SORGFÄLTIG AUFBEWAHREN!\n """
-@unittest.skipIf(True, "Known issue with newlines in text")
+@pytest.mark.xfail(reason="Known issue with newlines in text")
def test1():
meta1 = meta.encode("utf-8") % test_string1.encode("utf-8")
graph = ConjunctiveGraph()
@@ -59,14 +59,10 @@ def test1():
"""
-@unittest.skipIf(True, "Known issue with newlines in text")
+@pytest.mark.xfail(reason="Known issue with newlines in text")
def test2():
meta2 = meta.encode("utf-8") % test_string2.encode("utf-8")
graph = ConjunctiveGraph()
graph.parse(
StringInputSource(prefix + "" + meta2), format="n3"
)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/test/test_issue200.py b/test/test_issue200.py
index 8104747d8..1af0e839a 100644
--- a/test/test_issue200.py
+++ b/test/test_issue200.py
@@ -3,14 +3,16 @@
import os
import rdflib
import unittest
+import pytest
+
try:
from os import fork
from os import pipe
except ImportError:
- from nose import SkipTest
-
- raise SkipTest("No os.fork() and/or os.pipe() on this platform, skipping")
+ pytestmark = pytest.mark.skip(
+ reason="No os.fork() and/or os.pipe() on this platform, skipping"
+ )
class TestRandomSeedInFork(unittest.TestCase):
diff --git a/test/test_issue274.py b/test/test_issue274.py
index d52d80e4a..248b495cb 100644
--- a/test/test_issue274.py
+++ b/test/test_issue274.py
@@ -1,5 +1,4 @@
-from nose.tools import assert_raises
-from nose.tools import eq_
+from .testutils import eq_
from unittest import TestCase
from rdflib import BNode, Graph, Literal, Namespace, RDFS, XSD
@@ -181,14 +180,14 @@ def tearDown(self):
unregister_custom_function(EX.f, self.f)
def test_register_twice_fail(self):
- with assert_raises(ValueError):
+ with self.assertRaises(ValueError):
register_custom_function(EX.f, self.f)
def test_register_override(self):
register_custom_function(EX.f, self.f, override=True)
def test_wrong_unregister_fails(self):
- with assert_raises(ValueError):
+ with self.assertRaises(ValueError):
unregister_custom_function(EX.f, lambda x, y: None)
def test_f(self):
diff --git a/test/test_issue363.py b/test/test_issue363.py
index 5f88a6f40..93236e65b 100644
--- a/test/test_issue363.py
+++ b/test/test_issue363.py
@@ -1,5 +1,5 @@
+import pytest
import rdflib
-from nose.tools import assert_raises
data = """
1:
- check_serialize_parse(sys.argv[1], "n3", "n3", True)
- sys.exit()
- else:
- import nose
-
- nose.main(defaultTest=__name__)
+@pytest.mark.parametrize(
+ "fpath,fmt",
+ _get_test_files_formats(),
+)
+def test_n3_writing(fpath, fmt):
+ check_serialize_parse(fpath, fmt, "n3")
diff --git a/test/test_nquads_w3c.py b/test/test_nquads_w3c.py
index c66c12c56..8738ae7b7 100644
--- a/test/test_nquads_w3c.py
+++ b/test/test_nquads_w3c.py
@@ -1,10 +1,11 @@
"""This runs the nquads tests for the W3C RDF Working Group's N-Quads
test suite."""
+from typing import Callable, Dict
from rdflib import ConjunctiveGraph
-from test.manifest import nose_tests, RDFT
-
-from test.testutils import nose_tst_earl_report
+from rdflib.term import Node, URIRef
+from test.manifest import RDFT, RDFTest, read_manifest
+import pytest
verbose = False
@@ -21,22 +22,15 @@ def nquads(test):
raise
-testers = {RDFT.TestNQuadsPositiveSyntax: nquads, RDFT.TestNQuadsNegativeSyntax: nquads}
-
-
-def test_nquads(tests=None):
- for t in nose_tests(testers, "test/w3c/nquads/manifest.ttl"):
- if tests:
- for test in tests:
- if test in t[1].uri:
- break
- else:
- continue
-
- yield t
-
+testers: Dict[Node, Callable[[RDFTest], None]] = {
+ RDFT.TestNQuadsPositiveSyntax: nquads,
+ RDFT.TestNQuadsNegativeSyntax: nquads,
+}
-if __name__ == "__main__":
- verbose = True
- nose_tst_earl_report(test_nquads, "rdflib_nquads")
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest("test/w3c/nquads/manifest.ttl"),
+)
+def test_manifest(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
diff --git a/test/test_nt_w3c.py b/test/test_nt_w3c.py
index ca9ac1651..3f6894506 100644
--- a/test/test_nt_w3c.py
+++ b/test/test_nt_w3c.py
@@ -1,12 +1,14 @@
"""This runs the nt tests for the W3C RDF Working Group's N-Quads
test suite."""
import os
+from typing import Callable, Dict
from rdflib import Graph
+from rdflib.term import Node, URIRef
from test import TEST_DIR
-from test.manifest import nose_tests, RDFT
+from test.manifest import RDFT, RDFTest, read_manifest
-from test.testutils import nose_tst_earl_report
+import pytest
verbose = False
@@ -23,23 +25,15 @@ def nt(test):
raise
-testers = {RDFT.TestNTriplesPositiveSyntax: nt, RDFT.TestNTriplesNegativeSyntax: nt}
+testers: Dict[Node, Callable[[RDFTest], None]] = {
+ RDFT.TestNTriplesPositiveSyntax: nt,
+ RDFT.TestNTriplesNegativeSyntax: nt,
+}
-def test_nt(tests=None):
- manifest_file = os.path.join(TEST_DIR, "w3c/nt/manifest.ttl")
- for t in nose_tests(testers, manifest_file, legacy=True):
- if tests:
- for test in tests:
- if test in t[1].uri:
- break
- else:
- continue
-
- yield t
-
-
-if __name__ == "__main__":
- verbose = True
-
- nose_tst_earl_report(test_nt, "rdflib_nt")
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest(os.path.join(TEST_DIR, "w3c/nt/manifest.ttl"), legacy=True),
+)
+def test_manifest(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
diff --git a/test/test_roundtrip.py b/test/test_roundtrip.py
index 2928eacdb..f076e576a 100644
--- a/test/test_roundtrip.py
+++ b/test/test_roundtrip.py
@@ -1,4 +1,5 @@
-import sys
+import pytest
+
import rdflib
import rdflib.compare
@@ -87,7 +88,7 @@ def roundtrip(e, verbose=False):
formats = None
-def test_cases():
+def get_cases():
global formats
if not formats:
serializers = set(
@@ -104,7 +105,12 @@ def test_cases():
yield roundtrip, (infmt, testfmt, f)
-def test_n3():
+@pytest.mark.parametrize("checker, args", get_cases())
+def test_cases(checker, args):
+ checker(args)
+
+
+def get_n3_test():
global formats
if not formats:
serializers = set(
@@ -121,15 +127,6 @@ def test_n3():
yield roundtrip, (infmt, testfmt, f)
-if __name__ == "__main__":
- import nose
-
- if len(sys.argv) == 1:
- nose.main(defaultTest=sys.argv[0])
- elif len(sys.argv) == 2:
- import test.test_roundtrip
-
- test.test_roundtrip.formats = [sys.argv[1]]
- nose.main(defaultTest=sys.argv[0], argv=sys.argv[:1])
- else:
- roundtrip((sys.argv[2], sys.argv[1], sys.argv[3]), verbose=True)
+@pytest.mark.parametrize("checker, args", get_n3_test())
+def test_n3(checker, args):
+ checker(args)
diff --git a/test/test_sparql.py b/test/test_sparql.py
index d6e541e16..234403827 100644
--- a/test/test_sparql.py
+++ b/test/test_sparql.py
@@ -4,7 +4,7 @@
from rdflib.compare import isomorphic
from rdflib.term import Variable
-from nose.tools import eq_
+from .testutils import eq_
def test_graph_prefix():
@@ -245,9 +245,3 @@ def test_txtresult():
assert len(lines) == 3
vars_check = [Variable(var.strip()) for var in lines[0].split("|")]
assert vars_check == vars
-
-
-if __name__ == "__main__":
- import nose
-
- nose.main(defaultTest=__name__)
diff --git a/test/test_sparql_agg_undef.py b/test/test_sparql_agg_undef.py
index f36e9eb57..e4e96ea71 100644
--- a/test/test_sparql_agg_undef.py
+++ b/test/test_sparql_agg_undef.py
@@ -1,3 +1,4 @@
+import pytest
from rdflib import Graph, Literal, Variable
query_tpl = """
@@ -23,7 +24,7 @@ def template_tst(agg_func, first, second):
assert results[1][1] == second, (results[1][1], second)
-def test_aggregates():
+def get_aggregates_tests():
yield template_tst, "SUM", Literal(0), Literal(42)
yield template_tst, "MIN", None, Literal(42)
yield template_tst, "MAX", None, Literal(42)
@@ -33,6 +34,11 @@ def test_aggregates():
yield template_tst, "GROUP_CONCAT", Literal(""), Literal("42")
+@pytest.mark.parametrize("checker, agg_func, first, second", get_aggregates_tests())
+def test_aggregates(checker, agg_func, first, second) -> None:
+ checker(agg_func, first, second)
+
+
def test_group_by_null():
g = Graph()
results = list(
diff --git a/test/test_sparql_construct_bindings.py b/test/test_sparql_construct_bindings.py
index 8f8240b2d..7b21b7a44 100644
--- a/test/test_sparql_construct_bindings.py
+++ b/test/test_sparql_construct_bindings.py
@@ -3,8 +3,7 @@
from rdflib.compare import isomorphic
import unittest
-from nose.tools import eq_
-
+from .testutils import eq_
class TestConstructInitBindings(unittest.TestCase):
def test_construct_init_bindings(self):
diff --git a/test/test_sparql_datetime.py b/test/test_sparql_datetime.py
index a771b63ad..481ec3a24 100644
--- a/test/test_sparql_datetime.py
+++ b/test/test_sparql_datetime.py
@@ -2,7 +2,7 @@
from rdflib.plugins.sparql import prepareQuery
from rdflib.compare import isomorphic
import rdflib
-from nose.tools import eq_
+from .testutils import eq_
from pprint import pprint
import io
@@ -23,11 +23,11 @@ def test_dateTime_dateTime_subs_issue():
:C a rdfs:Class.
:start a rdf:Property;
- rdfs:domain :C;
+ rdfs:domain :C;
rdfs:range xsd:dateTime.
:end a rdf:Property;
- rdfs:domain :C;
+ rdfs:domain :C;
rdfs:range xsd:dateTime.
:c1 a :C;
@@ -274,9 +274,3 @@ def test_dateTime_dateTime_subs():
eq_(list(result1)[0][0], expected1)
eq_(list(result1)[1][0], expected2)
-
-
-if __name__ == "__main__":
- import nose
-
- nose.main(defaultTest=__name__)
diff --git a/test/test_sparql_operators.py b/test/test_sparql_operators.py
index b4e4dacdd..2c122eb4b 100644
--- a/test/test_sparql_operators.py
+++ b/test/test_sparql_operators.py
@@ -1,11 +1,9 @@
import datetime
-import nose.tools
-
import rdflib
from rdflib.plugins.sparql import operators
from rdflib.plugins.sparql import sparql
-
+import pytest
def test_date_cast():
now = datetime.datetime.now()
@@ -30,7 +28,7 @@ def test_datetime_cast():
assert result == now
-@nose.tools.raises(sparql.SPARQLError)
def test_datetime_cast_type_error():
literal = rdflib.Literal("2020-01-02")
- operators.date(literal)
+ with pytest.raises(sparql.SPARQLError):
+ operators.date(literal)
diff --git a/test/test_sparql_service.py b/test/test_sparql_service.py
index 7bfe8e4c0..91cbb2d44 100644
--- a/test/test_sparql_service.py
+++ b/test/test_sparql_service.py
@@ -145,8 +145,6 @@ def test_service_with_implicit_select_and_allcaps():
if __name__ == "__main__":
- # import nose
- # nose.main(defaultTest=__name__)
test_service()
test_service_with_bind()
test_service_with_values()
diff --git a/test/test_sparqlupdatestore.py b/test/test_sparqlupdatestore.py
index 75182d494..b41c934a9 100644
--- a/test/test_sparqlupdatestore.py
+++ b/test/test_sparqlupdatestore.py
@@ -1,5 +1,4 @@
# -*- coding: utf-8 -*-
-from nose import SkipTest
import unittest
import re
diff --git a/test/test_store_berkeleydb.py b/test/test_store_berkeleydb.py
index e5e8e66d1..f96fb9bc2 100644
--- a/test/test_store_berkeleydb.py
+++ b/test/test_store_berkeleydb.py
@@ -2,10 +2,13 @@
from tempfile import mktemp
from rdflib import ConjunctiveGraph, URIRef
from rdflib.store import VALID_STORE
+from rdflib.plugins.stores.berkeleydb import has_bsddb
class BerkeleyDBTestCase(unittest.TestCase):
def setUp(self):
+ if not has_bsddb:
+ self.skipTest("skipping as berkleydb is missing")
self.store_name = "BerkeleyDB"
self.path = mktemp()
self.g = ConjunctiveGraph(store=self.store_name)
@@ -32,7 +35,7 @@ def test_write(self):
), "There must be three triples in the graph after the first data chunk parse"
data2 = """
PREFIX :
-
+
:d :i :j .
"""
self.g.parse(data=data2, format="ttl")
@@ -41,7 +44,7 @@ def test_write(self):
), "There must be four triples in the graph after the second data chunk parse"
data3 = """
PREFIX :
-
+
:d :i :j .
"""
self.g.parse(data=data3, format="ttl")
@@ -61,9 +64,9 @@ def test_read(self):
def test_sparql_query(self):
q = """
PREFIX :
-
+
SELECT (COUNT(*) AS ?c)
- WHERE {
+ WHERE {
:d ?p ?o .
}"""
@@ -75,7 +78,7 @@ def test_sparql_query(self):
def test_sparql_insert(self):
q = """
PREFIX :
-
+
INSERT DATA {
:x :y :z .
}"""
@@ -93,7 +96,7 @@ def test_multigraph(self):
}
GRAPH :n {
:x :y :z .
- }
+ }
}"""
self.g.update(q)
@@ -104,7 +107,7 @@ def test_multigraph(self):
SELECT DISTINCT ?g
WHERE {
GRAPH ?g {
- ?s ?p ?o
+ ?s ?p ?o
}
}
}
diff --git a/test/test_swap_n3.py b/test/test_swap_n3.py
index 2ecafe301..1734806cb 100644
--- a/test/test_swap_n3.py
+++ b/test/test_swap_n3.py
@@ -1,8 +1,9 @@
-from nose.exc import SkipTest
import os
import sys
import unittest
+import pytest
+
maketrans = str.maketrans
import rdflib
@@ -71,7 +72,7 @@ def __repr__(self):
def generictest(e):
"""Documentation"""
if e.skip:
- raise SkipTest("%s skipped, known issue" % e.name)
+ pytest.skip("%s skipped, known issue" % e.name)
g = rdflib.Graph()
for i in [rdf, rdfs, xsd, owl, test, n3test, rdft, triage, mf, qt]:
g.bind(str(i), i)
@@ -95,7 +96,7 @@ def dir_to_uri(directory, sep=os.path.sep):
return "file:///%s" % (path,)
-def test_cases():
+def get_cases():
from copy import deepcopy
g = rdflib.Graph()
@@ -134,6 +135,6 @@ def test_cases():
yield gt, e
-if __name__ == "__main__":
- test_cases()
- # unittest.main()
+@pytest.mark.parametrize("gt, envelope", get_cases())
+def test_cases(gt, envelope):
+ gt(envelope)
diff --git a/test/test_trig.py b/test/test_trig.py
index 9dcd0ecca..b5ba1d666 100644
--- a/test/test_trig.py
+++ b/test/test_trig.py
@@ -2,8 +2,6 @@
import rdflib
import re
-from nose import SkipTest
-
TRIPLE = (
rdflib.URIRef("http://example.com/s"),
rdflib.RDFS.label,
diff --git a/test/test_trig_w3c.py b/test/test_trig_w3c.py
index 179d3680f..576ed6036 100644
--- a/test/test_trig_w3c.py
+++ b/test/test_trig_w3c.py
@@ -2,12 +2,14 @@
"""
+from typing import Callable, Dict
from rdflib import ConjunctiveGraph
from rdflib.namespace import split_uri
from rdflib.compare import graph_diff, isomorphic
+from rdflib.term import Node, URIRef
-from test.manifest import nose_tests, RDFT
-from test.testutils import nose_tst_earl_report
+from test.manifest import RDFT, RDFTest, read_manifest
+import pytest
verbose = False
@@ -59,7 +61,7 @@ def trig(test):
raise
-testers = {
+testers: Dict[Node, Callable[[RDFTest], None]] = {
RDFT.TestTrigPositiveSyntax: trig,
RDFT.TestTrigNegativeSyntax: trig,
RDFT.TestTrigEval: trig,
@@ -67,19 +69,9 @@ def trig(test):
}
-def test_trig(tests=None):
- for t in nose_tests(testers, "test/w3c/trig/manifest.ttl"):
- if tests:
- for test in tests:
- if test in t[1].uri:
- break
- else:
- continue
-
- yield t
-
-
-if __name__ == "__main__":
- verbose = True
-
- nose_tst_earl_report(test_trig, "rdflib_trig")
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest("test/w3c/trig/manifest.ttl"),
+)
+def test_manifest(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
diff --git a/test/test_turtle_serialize.py b/test/test_turtle_serialize.py
index 9e6f0b632..b17492e0e 100644
--- a/test/test_turtle_serialize.py
+++ b/test/test_turtle_serialize.py
@@ -113,10 +113,3 @@ def test_turtle_namespace():
assert "GENO:0000385" in output
assert "SERIAL:0167-6423" in output
assert "EX:name_with_(parenthesis)" in output
-
-
-if __name__ == "__main__":
- import nose
- import sys
-
- nose.main(defaultTest=sys.argv[0])
diff --git a/test/test_turtle_w3c.py b/test/test_turtle_w3c.py
index a1c2aaf7a..b8dde0c0b 100644
--- a/test/test_turtle_w3c.py
+++ b/test/test_turtle_w3c.py
@@ -1,12 +1,14 @@
"""This runs the turtle tests for the W3C RDF Working Group's N-Quads
test suite."""
+from typing import Callable, Dict
from rdflib import Graph
from rdflib.namespace import split_uri
from rdflib.compare import graph_diff, isomorphic
+from rdflib.term import Node, URIRef
-from test.manifest import nose_tests, RDFT
-from test.testutils import nose_tst_earl_report
+from test.manifest import RDFT, RDFTest, read_manifest
+import pytest
verbose = False
@@ -48,7 +50,7 @@ def turtle(test):
raise
-testers = {
+testers: Dict[Node, Callable[[RDFTest], None]] = {
RDFT.TestTurtlePositiveSyntax: turtle,
RDFT.TestTurtleNegativeSyntax: turtle,
RDFT.TestTurtleEval: turtle,
@@ -56,20 +58,9 @@ def turtle(test):
}
-def test_turtle(tests=None):
- for t in nose_tests(testers, "test/w3c/turtle/manifest.ttl"):
- if tests:
- for test in tests:
- if test in t[1].uri:
- break
- else:
- continue
-
- yield t
-
-
-if __name__ == "__main__":
-
- verbose = True
-
- nose_tst_earl_report(test_turtle, "rdflib_turtle")
+@pytest.mark.parametrize(
+ "rdf_test_uri, type, rdf_test",
+ read_manifest("test/w3c/turtle/manifest.ttl"),
+)
+def test_manifest(rdf_test_uri: URIRef, type: Node, rdf_test: RDFTest):
+ testers[type](rdf_test)
diff --git a/test/test_xmlliterals.py b/test/test_xmlliterals.py
index aeabbe888..7ce448737 100644
--- a/test/test_xmlliterals.py
+++ b/test/test_xmlliterals.py
@@ -20,10 +20,12 @@ def testPythonRoundtrip():
assert l1.eq(l4)
rdflib.NORMALIZE_LITERALS = False
- l4 = Literal("hello", datatype=RDF.XMLLiteral)
- assert l1 != l4
- assert l1.eq(l4)
- rdflib.NORMALIZE_LITERALS = True
+ try:
+ l4 = Literal("hello", datatype=RDF.XMLLiteral)
+ assert l1 != l4
+ assert l1.eq(l4)
+ finally:
+ rdflib.NORMALIZE_LITERALS = True
def testRDFXMLParse():
diff --git a/test/testutils.py b/test/testutils.py
index 05ddf9071..8d0a59ab6 100644
--- a/test/testutils.py
+++ b/test/testutils.py
@@ -29,7 +29,6 @@
from http.server import BaseHTTPRequestHandler, HTTPServer, SimpleHTTPRequestHandler
import email.message
from nose import SkipTest
-from .earl import add_test, report
import unittest
from rdflib import BNode, Graph, ConjunctiveGraph
@@ -45,7 +44,6 @@
# TODO: make an introspective version (like this one) of
# rdflib.graphutils.isomorphic and use instead.
from test import TEST_DIR
-from test.earl import add_test, report
def crapCompare(g1, g2):
@@ -160,8 +158,9 @@ def get_random_ip(parts: List[str] = None) -> str:
@contextmanager
-def ctx_http_server(handler: Type[BaseHTTPRequestHandler]) -> Iterator[HTTPServer]:
- host = get_random_ip()
+def ctx_http_server(
+ handler: Type[BaseHTTPRequestHandler], host: str = "127.0.0.1"
+) -> Iterator[HTTPServer]:
server = HTTPServer((host, 0), handler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
@@ -393,9 +392,8 @@ class ServedSimpleHTTPMock(SimpleHTTPMock, AbstractContextManager):
... assert req.path == "/bad/path"
"""
- def __init__(self):
+ def __init__(self, host: str = "127.0.0.1"):
super().__init__()
- host = get_random_ip()
self.server = HTTPServer((host, 0), self.Handler)
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.daemon = True
@@ -459,3 +457,15 @@ def test_example(self) -> None:
httpmock.do_get_responses.append(
MockHTTPResponse(200, "OK", b"here it is", {})
)
+
+
+def eq_(lhs, rhs, msg=None):
+ """
+ This function mimicks the similar function from nosetest. Ideally nothing
+ should use it but there is a lot of code that still does and it's fairly
+ simple to just keep this small pollyfill here for now.
+ """
+ if msg:
+ assert lhs == rhs, msg
+ else:
+ assert lhs == rhs
diff --git a/tox.ini b/tox.ini
index 01173dcbe..81482217a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,7 +8,7 @@ setenv =
commands =
{envpython} setup.py clean --all
{envpython} setup.py build
- {envpython} run_tests.py
+ {envpython} -m pytest
deps =
-rrequirements.txt
-rrequirements.dev.txt
@@ -17,9 +17,10 @@ deps =
basepython =
python3.7
commands =
- {envpython} run_tests.py --where=./ \
- --with-coverage --cover-html --cover-html-dir=./coverage \
- --cover-package=rdflib --cover-inclusive
+ {envpython} -m pytest \
+ --cov-report term \
+ --cov-report html \
+ --cov
deps =
-rrequirements.txt