Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
0615d71
Unit tests dir structure and GA workflow
uoboda-splunk Jan 29, 2021
d7f6a5a
commenting out test matrix worklow
uoboda-splunk Jan 29, 2021
17607a1
eventtype_parser unit tests (#251)
kkania-splunk Feb 1, 2021
5e851e0
Feature/unit tests/tags parser (#257)
kkania-splunk Feb 4, 2021
e5b5897
Feature/unit tests/infra 30036/props parser (#259)
uoboda-splunk Feb 5, 2021
4c586ad
Feature/unit tests/infra 30109/fields (#260)
uoboda-splunk Feb 9, 2021
da96160
Feature/unit tests/infra 30097/transforms parser (#261)
kkania-splunk Feb 9, 2021
a832557
Feature/unit tests/infra 30039/event ingestors (#265)
kkania-splunk Feb 16, 2021
d3338d5
Feature/unit tests/infra 30041/sample generation (#266)
uoboda-splunk Feb 16, 2021
52eed81
Changing location of test files (#267)
uoboda-splunk Feb 16, 2021
e533633
reload module change
kkania-splunk Feb 17, 2021
4bb8788
test_data_set
kkania-splunk Feb 18, 2021
539eacf
test_data_model
kkania-splunk Feb 18, 2021
e5dc501
test_json_schema.py
kkania-splunk Feb 19, 2021
ad8a82e
test_data_model_handler
kkania-splunk Feb 19, 2021
8160989
test_field_test_adapter
kkania-splunk Feb 22, 2021
5e4ab8c
test_field_test_helper
kkania-splunk Feb 22, 2021
a8c17f2
more tests for test_field_test_helper
kkania-splunk Feb 23, 2021
440d34c
test_test_generator
kkania-splunk Feb 23, 2021
51c3d9e
conftest
kkania-splunk Feb 24, 2021
08c9bbe
more tests for test_generator
kkania-splunk Feb 24, 2021
dec6c73
more tests for test_generator
kkania-splunk Feb 25, 2021
9d3984e
Merge branch 'main' into feature/unit-tests/INFRA-30038/cim_tests
kkania-splunk Feb 26, 2021
f388fae
poetry lock conflicts resolved
kkania-splunk Feb 26, 2021
2269a9b
[CR] review remarks
kkania-splunk Mar 1, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions tests/unit/pytest.ini
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ addopts = -v --tb=long --log-level=INFO
# --force-flaky --max-runs=3 --min-passes=1
filterwarnings =
ignore::DeprecationWarning

Empty file.
16 changes: 16 additions & 0 deletions tests/unit/tests_standard_lib/test_cim_tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import pytest
from unittest.mock import MagicMock, mock_open


@pytest.fixture()
def open_mock(monkeypatch):
open_mock = mock_open()
monkeypatch.setattr("builtins.open", open_mock)
return open_mock


@pytest.fixture()
def json_load_mock(monkeypatch):
load_mock = MagicMock()
monkeypatch.setattr("json.load", load_mock)
return load_mock
62 changes: 62 additions & 0 deletions tests/unit/tests_standard_lib/test_cim_tests/test_data_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import pytest
from unittest.mock import patch, MagicMock, call
from collections import namedtuple
from pytest_splunk_addon.standard_lib.cim_tests.data_model import DataModel


@pytest.fixture()
def data_set_mock(monkeypatch):
data_set_mock = MagicMock()
data_set_mock.return_value = data_set_mock
data_set_mock.load_dataset.return_value = ["dataset1", "dataset2"]
monkeypatch.setattr(
"pytest_splunk_addon.standard_lib.cim_tests.data_model.DataSet", data_set_mock
)
return data_set_mock


def test_data_model_instantiation(data_set_mock):
data_model = DataModel({"model_name": "test_model_name", "objects": []})
assert data_model.name == "test_model_name"
assert data_model.root_data_set == ["dataset1", "dataset2"]


def test_data_model_string(data_set_mock):
data_model = DataModel({"model_name": "test_model_name", "objects": []})
assert str(data_model) == data_model.name == "test_model_name"


def test_get_mapped_datasets_calls_internal_function():
m1 = MagicMock()
m1.side_effect = lambda x, y: (i for i in range(3))
with patch.object(DataModel, "__init__", return_value=None), patch.object(
DataModel, "_get_mapped_datasets", m1
):
data_model = DataModel({})
data_model.root_data_set = ["root_data_set"]
assert list(data_model.get_mapped_datasets(["addons_tags"])) == [0, 1, 2]
m1.assert_has_calls([call(["addons_tags"], ["root_data_set"])])


def test__get_mapped_datasets():
data_set = namedtuple("DataSet", ["name", "match_tags", "child_dataset"])
dataset1 = data_set(
"dataset1",
lambda x: True,
[
data_set("dataset1a", lambda x: True, []),
data_set(
"dataset1b",
lambda x: False,
[data_set("dataset1c", lambda x: True, [])],
),
],
)
dataset2 = data_set("dataset2", lambda x: True, [])
with patch.object(DataModel, "__init__", return_value=None):
data_model = DataModel({})
assert list(data_model._get_mapped_datasets([], [dataset1, dataset2])) == [
[dataset1],
[dataset1, dataset1.child_dataset[0]],
[dataset2],
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import pytest
from unittest.mock import MagicMock, call, patch, PropertyMock
from collections import namedtuple
from pytest_splunk_addon.standard_lib.cim_tests.data_model_handler import (
DataModelHandler,
)


@pytest.fixture()
def listdir_mock(monkeypatch):
monkeypatch.setattr(
"os.listdir",
MagicMock(return_value=["model1.json", "model2.xml", "model3.json"]),
)


@pytest.fixture()
def data_model_mock(monkeypatch):
dm = MagicMock()
dm.side_effect = ["data_model_instance_1", "data_model_instance_2"]
monkeypatch.setattr(
"pytest_splunk_addon.standard_lib.cim_tests.data_model_handler.DataModel", dm
)
return dm


@pytest.fixture()
def json_schema_mock(monkeypatch):
js = MagicMock()
js.parse_data_model.side_effect = ["parsed_data_model_1", "parsed_data_model_2"]
monkeypatch.setattr(
"pytest_splunk_addon.standard_lib.cim_tests.data_model_handler.JSONSchema", js
)
return js


def test_data_models():
with patch.object(
DataModelHandler,
"load_data_models",
return_value=(f"data_model_{i+1}" for i in range(3)),
):
dmh = DataModelHandler("/fake_path")
assert dmh.data_models == ["data_model_1", "data_model_2", "data_model_3"]
assert dmh._data_models == ["data_model_1", "data_model_2", "data_model_3"]


def test_load_data_model(listdir_mock, data_model_mock, json_schema_mock):
dmh = DataModelHandler("/fake_path")
assert list(dmh.load_data_models("/fake_path/data_models")) == [
"data_model_instance_1",
"data_model_instance_2",
]
data_model_mock.assert_has_calls(
[call("parsed_data_model_1"), call("parsed_data_model_2")]
)
json_schema_mock.parse_data_model.assert_has_calls(
[
call("/fake_path/data_models/model1.json"),
call("/fake_path/data_models/model3.json"),
]
)


def test_get_all_tags_per_stanza():
dmh = DataModelHandler("/fake_path")
addon_parser = namedtuple("AddonParser", ["get_tags"])
ap = addon_parser(
lambda: [
{
"stanza": "test_stanza_1",
"tag": {"tag1_key": "tag1_value", "tag2_key": "tag2_value"},
},
{"stanza": "test_stanza_2", "tag": {"tag3_key": "tag3_value"}},
{"stanza": "test_stanza_1", "tag": {"tag4_key": "tag4_value"}},
]
)
assert dmh._get_all_tags_per_stanza(ap) == {
"test_stanza_1": [
{"tag1_key": "tag1_value", "tag2_key": "tag2_value"},
{"tag4_key": "tag4_value"},
],
"test_stanza_2": [{"tag3_key": "tag3_value"}],
}


def test_get_mapped_data_models(caplog):
data_model = namedtuple("DataModel", ["get_mapped_datasets"])
data_models = [
data_model(lambda x: x if len(x) == 1 else []),
data_model(lambda x: []),
data_model(lambda x: x if len(x) == 3 else []),
]
with patch.object(
DataModelHandler,
"_get_all_tags_per_stanza",
return_value={
"stanza_1": [{"tag_key_1a": "tag_value_1a", "tag_key_1b": "tag_value_1b"}],
"stanza_2": [
{"tag_key_2a": "tag_value_2a", "tag_key_2b": "tag_value_2b"},
None,
],
"stanza_3": [
{"tag_key_3a": "tag_value_3a"},
{"tag_key_3b": "tag_value_3b"},
{"tag_key_3c": "tag_value_3c"},
],
},
), patch.object(
DataModelHandler,
"data_models",
new_callable=PropertyMock,
return_value=data_models,
):
dmh = DataModelHandler("/fake_path")
assert list(dmh.get_mapped_data_models("addon_parser")) == [
(
"stanza_1",
{"tag_key_1a": "tag_value_1a", "tag_key_1b": "tag_value_1b"},
),
("stanza_3", {"tag_key_3a": "tag_value_3a"}),
("stanza_3", {"tag_key_3b": "tag_value_3b"}),
("stanza_3", {"tag_key_3c": "tag_value_3c"}),
]
assert "No Data Model mapped for stanza_2" in caplog.messages
125 changes: 125 additions & 0 deletions tests/unit/tests_standard_lib/test_cim_tests/test_data_set.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
import pytest
from unittest.mock import patch, call
from collections import namedtuple
from pytest_splunk_addon.standard_lib.cim_tests.data_set import DataSet

field = namedtuple("Field", ["name"])


@pytest.fixture()
def mocked_dataset_constructor():
from pytest_splunk_addon.standard_lib.cim_tests.data_set import Field

with patch.object(
DataSet, "load_dataset", return_value=("child_dataset1", "child_dataset2")
), patch.object(
Field, "parse_fields", return_value=("field1", "field2")
), patch.object(
DataSet,
"_parse_fields_cluster",
return_value="parse_field_cluster_return_value",
), patch.object(
DataSet, "_parse_constraint", return_value="prase_constraints_return_value"
):
return DataSet(
{
"name": "dataset1",
"tags": ["test_tag1", "test_tag2"],
"child_dataset": [],
"fields": [{"name": "app"}],
"fields_cluster": [],
"search_constraints": "tag=alert",
},
"test_data_model",
)


@pytest.fixture()
def dataset_mock():
with patch.object(DataSet, "__init__", return_value=None):
dataset = DataSet({}, "test_data_model")
dataset.name = "dataset2"
dataset.fields = [
field("bytes"),
field("bytes_in"),
field("bytes_out"),
field("dest"),
field("dest_ip"),
field("dest_mac"),
]
dataset.tags = [["tag1", "tag1b"], ["tag2", "tag3"]]
return dataset


def test_dataset_can_be_loaded():
with patch.object(DataSet, "__init__", return_value=None) as data_set_mock:
list(
DataSet.load_dataset(
[
{"name": "dataset1", "tags": ["tag1"]},
{"name": "dataset2", "tags": ["tag2"]},
],
"test",
)
)
data_set_mock.assert_has_calls(
[
call({"name": "dataset1", "tags": ["tag1"]}, "test"),
call({"name": "dataset2", "tags": ["tag2"]}, "test"),
]
)


def test_dataset_instantiation(mocked_dataset_constructor):
assert mocked_dataset_constructor.name == "dataset1"
assert mocked_dataset_constructor.tags == ["test_tag1", "test_tag2"]
assert mocked_dataset_constructor.data_model == "test_data_model"
assert mocked_dataset_constructor.child_dataset == [
"child_dataset1",
"child_dataset2",
]
assert mocked_dataset_constructor.fields == ["field1", "field2"]
assert (
mocked_dataset_constructor.fields_cluster == "parse_field_cluster_return_value"
)
assert (
mocked_dataset_constructor.search_constraints
== "prase_constraints_return_value"
)


def test_dataset_string(mocked_dataset_constructor):
assert (
mocked_dataset_constructor.name == str(mocked_dataset_constructor) == "dataset1"
)


def test_parse_constraints():
assert DataSet._parse_constraint("constraints") == "constraints"


def test_parse_fields_cluster(dataset_mock):
assert dataset_mock._parse_fields_cluster(
[["bytes", "bytes_in", "bytes_out"], ["dest", "dest_ip", "dest_mac"]]
) == [
[field("bytes"), field("bytes_in"), field("bytes_out")],
[field("dest"), field("dest_ip"), field("dest_mac")],
]


def test_parse_fields_raises_error_when_cluster_field_not_in_fields_list(dataset_mock):
with pytest.raises(
AssertionError,
match="Dataset=dataset2, Each cluster field should be included in fields list",
):
dataset_mock._parse_fields_cluster(
[["bytes", "bytes_in", "bytes_out", "bytes_all"]]
)


def test_tags_match(dataset_mock):
assert dataset_mock.match_tags(["tag1", "tag1a", "tag2", "tag3"]) is True


def test_tags_not_match(dataset_mock):
assert dataset_mock.match_tags(["tag1", "tag1a", "tag2"]) is None
Loading