Browse Source

Excluding modin tests

jrobinAV 1 year ago
parent
commit
c9c94a656b

+ 1 - 1
.github/workflows/partial-tests.yml

@@ -89,7 +89,7 @@ jobs:
 
       - name: Pytest Core
         if: steps.changes.outputs.core == 'true'
-        run: pipenv run pytest -m "not orchestrator_dispatcher" tests/core
+        run: pipenv run pytest -m "not orchestrator_dispatcher and not modin" tests/core
 
       - name: Pytest GUI
         if: steps.changes.outputs.gui == 'true'

+ 1 - 0
pytest.ini

@@ -10,3 +10,4 @@ filterwarnings =
 markers =
     teste2e:End-to-end tests
     orchestrator_dispatcher:Orchestrator dispatcher tests
+    modin:Tests using modin

+ 28 - 10
tests/core/data/test_csv_data_node.py

@@ -111,7 +111,7 @@ class TestCSVDataNode:
         dn = CSVDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         assert os.path.exists(dn.path) is exists
 
-    def test_read_with_header(self):
+    def test_read_with_header_pandas(self):
         not_existing_csv = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.csv", "has_header": True})
         with pytest.raises(NoData):
             assert not_existing_csv.read() is None
@@ -125,6 +125,9 @@ class TestCSVDataNode:
         assert len(data_pandas) == 10
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
 
+    @pytest.mark.modin
+    def test_read_with_header_pandas(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create CSVDataNode with modin exposed_type
         csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
         data_modin = csv_data_node_as_modin.read()
@@ -132,6 +135,8 @@ class TestCSVDataNode:
         assert len(data_modin) == 10
         assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy())
 
+    def test_read_with_header_numpy(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create CSVDataNode with numpy exposed_type
         csv_data_node_as_numpy = CSVDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "has_header": True, "exposed_type": "numpy"}
@@ -141,6 +146,12 @@ class TestCSVDataNode:
         assert len(data_numpy) == 10
         assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy())
 
+    def test_read_with_header_custom_exposed_type(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
+        csv_data_node_as_pandas = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path})
+        data_pandas = csv_data_node_as_pandas.read()
+
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create the same CSVDataNode but with custom exposed_type
         csv_data_node_as_custom_object = CSVDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": MyCustomObject}
@@ -169,15 +180,6 @@ class TestCSVDataNode:
         assert len(data_pandas) == 11
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy())
 
-        # Create CSVDataNode with modin exposed_type
-        csv_data_node_as_modin = CSVDataNode(
-            "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin = csv_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 11
-        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
-
         # Create CSVDataNode with numpy exposed_type
         csv_data_node_as_numpy = CSVDataNode(
             "qux", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "numpy"}
@@ -201,6 +203,18 @@ class TestCSVDataNode:
             assert str(row_pandas[1]) == row_custom.integer
             assert row_pandas[2] == row_custom.text
 
+    @pytest.mark.modin
+    def test_read_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
+        # Create CSVDataNode with modin exposed_type
+        csv_data_node_as_modin = CSVDataNode(
+            "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
+        )
+        data_modin = csv_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 11
+        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
+
     @pytest.mark.parametrize(
         "content",
         [
@@ -219,6 +233,7 @@ class TestCSVDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content",
         [
@@ -279,6 +294,7 @@ class TestCSVDataNode:
         with pytest.raises(UnicodeError):
             utf8_dn.read()
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content,columns",
         [
@@ -302,6 +318,7 @@ class TestCSVDataNode:
         csv_dn.write(None)
         assert len(csv_dn.read()) == 0
 
+    @pytest.mark.modin
     def test_write_modin_with_different_encoding(self, csv_file):
         data = pd.DataFrame([{"≥a": 1, "b": 2}])
 
@@ -394,6 +411,7 @@ class TestCSVDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, csv_file):
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
         dn.write(

+ 95 - 71
tests/core/data/test_excel_data_node.py

@@ -146,16 +146,6 @@ class TestExcelDataNode:
         assert len(data_pandas) == 5
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path).to_numpy())
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 5
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy())
-
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "numpy", "sheet_name": "Sheet1"}
@@ -191,6 +181,20 @@ class TestExcelDataNode:
             assert row_pandas["integer"] == row_custom.integer
             assert row_pandas["text"] == row_custom.text
 
+    @pytest.mark.modin
+    def test_read_with_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
+        )
+
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 5
+        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy())
+
     def test_read_without_header(self):
         not_existing_excel = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx", "has_header": False}
@@ -210,17 +214,6 @@ class TestExcelDataNode:
         assert len(data_pandas) == 6
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path, header=None).to_numpy())
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": "Sheet1", "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 6
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy())
-
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
@@ -263,6 +256,21 @@ class TestExcelDataNode:
             assert row_pandas[1] == row_custom.integer
             assert row_pandas[2] == row_custom.text
 
+    @pytest.mark.modin
+    def test_read_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar",
+            Scope.SCENARIO,
+            properties={"path": path, "has_header": False, "sheet_name": "Sheet1", "exposed_type": "modin"},
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 6
+        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy())
+
+
     @pytest.mark.parametrize(
         "content,columns",
         [
@@ -398,6 +406,7 @@ class TestExcelDataNode:
         else:
             assert len(excel_dn.read()) == 1
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content,columns",
         [
@@ -460,32 +469,6 @@ class TestExcelDataNode:
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": sheet_names, "exposed_type": "modin"}
-        )
-
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(
-            len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            for sheet_name in sheet_names
-        )
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name))
-
-        excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"}
-        )
-
-        data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
@@ -636,6 +619,36 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
 
+    @pytest.mark.modin
+    def test_read_multi_sheet_with_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        sheet_names = ["Sheet1", "Sheet2"]
+
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": sheet_names, "exposed_type": "modin"}
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, Dict)
+        assert len(data_modin) == 2
+        assert all(
+            len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame)
+            for sheet_name in sheet_names
+        )
+        assert list(data_modin.keys()) == sheet_names
+        for sheet_name in sheet_names:
+            assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name))
+
+        excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"}
+        )
+
+        data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read()
+        assert isinstance(data_modin_no_sheet_name, Dict)
+        for key in data_modin_no_sheet_name.keys():
+            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
+            assert data_modin[key].equals(data_modin_no_sheet_name[key])
+
     def test_read_multi_sheet_without_header(self):
         not_existing_excel = ExcelDataNode(
             "foo",
@@ -671,30 +684,6 @@ class TestExcelDataNode:
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names)
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name))
-
-        excel_data_node_as_modin_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
@@ -864,6 +853,34 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
 
+    @pytest.mark.modin
+    def test_read_multi_sheet_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        sheet_names = ["Sheet1", "Sheet2"]
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar",
+            Scope.SCENARIO,
+            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "exposed_type": "modin"},
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, Dict)
+        assert len(data_modin) == 2
+        assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names)
+        assert list(data_modin.keys()) == sheet_names
+        for sheet_name in sheet_names:
+            assert isinstance(data_modin[sheet_name], modin_pd.DataFrame)
+            assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name))
+
+        excel_data_node_as_modin_no_sheet_name = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
+        )
+        data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read()
+        assert isinstance(data_modin_no_sheet_name, Dict)
+        for key in data_modin_no_sheet_name.keys():
+            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
+            assert data_modin[key].equals(data_modin_no_sheet_name[key])
+
     @pytest.mark.parametrize(
         "content,columns",
         [
@@ -908,6 +925,7 @@ class TestExcelDataNode:
         read_data = excel_dn.read()
         assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names)
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content,columns",
         [
@@ -1041,6 +1059,7 @@ class TestExcelDataNode:
         )
         assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content",
         [
@@ -1063,6 +1082,7 @@ class TestExcelDataNode:
             ).reset_index(drop=True),
         )
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content",
         [
@@ -1083,6 +1103,7 @@ class TestExcelDataNode:
             ),
         )
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
         "content",
         [
@@ -1259,6 +1280,7 @@ class TestExcelDataNode:
         assert dn["sheet_1"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["sheet_2"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_with_sheetname(self, excel_file):
         dn = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
@@ -1310,6 +1332,7 @@ class TestExcelDataNode:
         df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
         df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_without_sheetname(self, excel_file):
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "modin"})
         dn.write(
@@ -1331,6 +1354,7 @@ class TestExcelDataNode:
         assert dn["Sheet1"]["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
         assert dn["Sheet1"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_multisheet(self, excel_file):
         dn = ExcelDataNode(
             "foo",

+ 31 - 9
tests/core/data/test_parquet_data_node.py

@@ -135,6 +135,7 @@ class TestParquetDataNode:
         dn = ParquetDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         assert os.path.exists(dn.path) is exists
 
+
     @pytest.mark.parametrize("engine", __engine)
     def test_read_file(self, engine, parquet_file_path):
         not_existing_parquet = ParquetDataNode(
@@ -155,6 +156,19 @@ class TestParquetDataNode:
         assert data_pandas.equals(df)
         assert np.array_equal(data_pandas.to_numpy(), df.to_numpy())
 
+        # Create ParquetDataNode with numpy exposed_type
+        parquet_data_node_as_numpy = ParquetDataNode(
+            "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy", "engine": engine}
+        )
+        data_numpy = parquet_data_node_as_numpy.read()
+        assert isinstance(data_numpy, np.ndarray)
+        assert len(data_numpy) == 2
+        assert np.array_equal(data_numpy, df.to_numpy())
+
+    @pytest.mark.modin
+    @pytest.mark.parametrize("engine", __engine)
+    def test_read_file_modin(self, engine, parquet_file_path):
+        df = pd.read_parquet(parquet_file_path)
         # Create ParquetDataNode with modin exposed_type
         parquet_data_node_as_modin = ParquetDataNode(
             "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
@@ -165,15 +179,6 @@ class TestParquetDataNode:
         assert data_modin.equals(df)
         assert np.array_equal(data_modin.to_numpy(), df.to_numpy())
 
-        # Create ParquetDataNode with numpy exposed_type
-        parquet_data_node_as_numpy = ParquetDataNode(
-            "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy", "engine": engine}
-        )
-        data_numpy = parquet_data_node_as_numpy.read()
-        assert isinstance(data_numpy, np.ndarray)
-        assert len(data_numpy) == 2
-        assert np.array_equal(data_numpy, df.to_numpy())
-
     @pytest.mark.parametrize("engine", __engine)
     def test_read_folder(self, engine):
         parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
@@ -317,6 +322,7 @@ class TestParquetDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
 
+    @pytest.mark.modin
     @pytest.mark.skipif(not util.find_spec("fastparquet"), reason="Append parquet requires fastparquet to be installed")
     @pytest.mark.parametrize(
         "content",
@@ -353,6 +359,21 @@ class TestParquetDataNode:
         assert pathlib.Path(temp_file_path).exists()
         assert isinstance(dn.read(), pd.DataFrame)
 
+    @pytest.mark.modin
+    @pytest.mark.parametrize(
+        "data",
+        [
+            modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
+        ],
+    )
+    def test_write_to_disk_modin(self, tmpdir_factory, data):
+        temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
+        dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
+        dn.write(data)
+
+        assert pathlib.Path(temp_file_path).exists()
+        assert isinstance(dn.read(), pd.DataFrame)
+
     def test_filter_pandas_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
         dn.write(
@@ -402,6 +423,7 @@ class TestParquetDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin"})
         dn.write(

+ 2 - 1
tests/core/data/test_pickle_data_node.py

@@ -124,9 +124,10 @@ class TestPickleDataNodeEntity:
         assert isinstance(pickle_dict.read(), dict)
         assert pickle_dict.read() == {"bar": 12, "baz": "qux", "quux": [13]}
 
+    @pytest.mark.modin
+    def test_read_and_write_modin(self):
         default_pandas = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         new_pandas_df = pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
-
         default_modin = modin_pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         new_modin_df = modin_pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
 

+ 2 - 0
tests/core/data/test_sql_data_node.py

@@ -337,6 +337,7 @@ class TestSQLDataNode:
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
+    @pytest.mark.modin
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
@@ -430,6 +431,7 @@ class TestSQLDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {

+ 2 - 0
tests/core/data/test_sql_table_data_node.py

@@ -464,6 +464,7 @@ class TestSQLTableDataNode:
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
+    @pytest.mark.modin
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
@@ -539,6 +540,7 @@ class TestSQLTableDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {