Bläddra i källkod

Merge pull request #654 from Avaiga/feature/#631-remove-modin

Feature/#631 remove modin
Jean-Robin 1 år sedan
förälder
incheckning
e796fb1849

+ 1 - 1
.github/workflows/overall-tests.yml

@@ -41,7 +41,7 @@ jobs:
         run: pipenv run playwright install chromium --with-deps
         run: pipenv run playwright install chromium --with-deps
 
 
       - name: Pytest
       - name: Pytest
-        run: pipenv run pytest -m "not orchestrator_dispatcher and not modin and not standalone" --cov=taipy --cov-append --cov-report="xml:overall-coverage.xml" --cov-report term-missing tests
+        run: pipenv run pytest -m "not orchestrator_dispatcher and not standalone" --cov=taipy --cov-append --cov-report="xml:overall-coverage.xml" --cov-report term-missing tests
 
 
       - name: Coverage
       - name: Coverage
         if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
         if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'

+ 1 - 40
.github/workflows/partial-tests.yml

@@ -90,7 +90,7 @@ jobs:
 
 
       - name: Pytest Core
       - name: Pytest Core
         if: steps.changes.outputs.core == 'true'
         if: steps.changes.outputs.core == 'true'
-        run: pipenv run pytest -m "not orchestrator_dispatcher and not modin and not standalone" tests/core
+        run: pipenv run pytest -m "not orchestrator_dispatcher and not standalone" tests/core
 
 
       - name: Pytest GUI
       - name: Pytest GUI
         if: steps.changes.outputs.gui == 'true'
         if: steps.changes.outputs.gui == 'true'
@@ -189,42 +189,3 @@ jobs:
       - name: Pytest Core standalone
       - name: Pytest Core standalone
         if: steps.changes.outputs.core == 'true'
         if: steps.changes.outputs.core == 'true'
         run: pipenv run pytest -m "standalone" tests/core
         run: pipenv run pytest -m "standalone" tests/core
-
-  modin_tests:
-    needs: linter
-    timeout-minutes: 20
-    strategy:
-      fail-fast: false
-      matrix:
-        python-version: ['3.8', '3.9', '3.10', '3.11']
-        os: [ubuntu-latest, windows-latest, macos-latest]
-    runs-on: ${{ matrix.os }}
-    steps:
-      - uses: actions/checkout@v4
-
-      - uses: dorny/paths-filter@v2
-        id: changes
-        with:
-          filters: |
-            core:
-              - 'taipy/core/**'
-
-      - uses: actions/setup-python@v5
-        with:
-          python-version: ${{matrix.python-version}}
-
-      - name: Install pipenv
-        if: steps.changes.outputs.core == 'true'
-        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
-
-      - name: Install Dependencies
-        if: steps.changes.outputs.core == 'true'
-        run: pipenv install --dev --python=${{ matrix.python-version }}
-
-      - name: Setup LibMagic (MacOS)
-        if: matrix.os == 'macos-latest' && steps.changes.outputs.core == 'true'
-        run: brew install libmagic
-
-      - name: Pytest Core modin
-        if: steps.changes.outputs.core == 'true'
-        run: pipenv run pytest -m "modin" tests/core

+ 0 - 1
Pipfile

@@ -19,7 +19,6 @@ gitignore-parser = "==0.1.1"
 kthread = "==0.2.3"
 kthread = "==0.2.3"
 markdown = "==3.4.4"
 markdown = "==3.4.4"
 marshmallow = "==3.20.1"
 marshmallow = "==3.20.1"
-modin = {extras = ["dask"], version = "==0.23.1"}
 networkx = "==2.6"
 networkx = "==2.6"
 openpyxl = "==3.1.2"
 openpyxl = "==3.1.2"
 pandas = "==2.0.0"
 pandas = "==2.0.0"

+ 0 - 1
pytest.ini

@@ -10,5 +10,4 @@ filterwarnings =
 markers =
 markers =
     teste2e:End-to-end tests
     teste2e:End-to-end tests
     orchestrator_dispatcher:Orchestrator dispatcher tests
     orchestrator_dispatcher:Orchestrator dispatcher tests
-    modin:Tests using modin
     standalone:Tests starting a standalone dispatcher thread
     standalone:Tests starting a standalone dispatcher thread

+ 1 - 1
taipy/core/Pipfile

@@ -4,7 +4,7 @@ verify_ssl = true
 name = "pypi"
 name = "pypi"
 
 
 [packages]
 [packages]
-modin = {extras = ["dask"], version = "==0.23.1"}
+pandas = "==2.0.0"
 networkx = "==2.6"
 networkx = "==2.6"
 openpyxl = "==3.1.2"
 openpyxl = "==3.1.2"
 pyarrow = "==10.0.1"
 pyarrow = "==10.0.1"

+ 1 - 1
taipy/core/config/checkers/_data_node_config_checker.py

@@ -223,5 +223,5 @@ class _DataNodeConfigChecker(_ConfigChecker):
                 data_node_config._EXPOSED_TYPE_KEY,
                 data_node_config._EXPOSED_TYPE_KEY,
                 data_node_config.exposed_type,
                 data_node_config.exposed_type,
                 f"The `{data_node_config._EXPOSED_TYPE_KEY}` of DataNodeConfig `{data_node_config_id}` "
                 f"The `{data_node_config._EXPOSED_TYPE_KEY}` of DataNodeConfig `{data_node_config_id}` "
-                f'must be either "pandas", "modin", "numpy", or a custom type.',
+                f'must be either "pandas", "numpy", or a custom type.',
             )
             )

+ 11 - 3
taipy/core/config/data_node_config.py

@@ -38,7 +38,7 @@ class DataNodeConfig(Section):
             are : "csv", "excel", "pickle", "sql_table", "sql", "mongo_collection", "generic", "json", "parquet",
             are : "csv", "excel", "pickle", "sql_table", "sql", "mongo_collection", "generic", "json", "parquet",
             "in_memory and "s3_object".
             "in_memory and "s3_object".
             The default value is "pickle".
             The default value is "pickle".
-            Note that the "in_memory" value can only be used when `JobConfig^`.mode is "standalone".
+            Note that the "in_memory" value can only be used when `JobConfig^` mode is "development".
         scope (Optional[Scope^]): The optional `Scope^` of the data nodes instantiated from the data node config.
         scope (Optional[Scope^]): The optional `Scope^` of the data nodes instantiated from the data node config.
             The default value is SCENARIO.
             The default value is SCENARIO.
         **properties (dict[str, any]): A dictionary of additional properties.
         **properties (dict[str, any]): A dictionary of additional properties.
@@ -76,13 +76,12 @@ class DataNodeConfig(Section):
 
 
     _EXPOSED_TYPE_KEY = "exposed_type"
     _EXPOSED_TYPE_KEY = "exposed_type"
     _EXPOSED_TYPE_PANDAS = "pandas"
     _EXPOSED_TYPE_PANDAS = "pandas"
-    _EXPOSED_TYPE_MODIN = "modin"
+    _EXPOSED_TYPE_MODIN = "modin"  # Deprecated in favor of pandas since 3.1.0
     _EXPOSED_TYPE_NUMPY = "numpy"
     _EXPOSED_TYPE_NUMPY = "numpy"
     _DEFAULT_EXPOSED_TYPE = _EXPOSED_TYPE_PANDAS
     _DEFAULT_EXPOSED_TYPE = _EXPOSED_TYPE_PANDAS
 
 
     _ALL_EXPOSED_TYPES = [
     _ALL_EXPOSED_TYPES = [
         _EXPOSED_TYPE_PANDAS,
         _EXPOSED_TYPE_PANDAS,
-        _EXPOSED_TYPE_MODIN,
         _EXPOSED_TYPE_NUMPY,
         _EXPOSED_TYPE_NUMPY,
     ]
     ]
 
 
@@ -282,6 +281,15 @@ class DataNodeConfig(Section):
         self._validity_period = validity_period
         self._validity_period = validity_period
         super().__init__(id, **properties)
         super().__init__(id, **properties)
 
 
+        # modin exposed type is deprecated since taipy 3.1.0
+        # It is automatically replaced by pandas
+        if "exposed_type" in properties and properties["exposed_type"] == DataNodeConfig._EXPOSED_TYPE_MODIN:
+            _warn_deprecated(
+                "exposed_type='modin'",
+                suggest="exposed_type='pandas'",
+            )
+            properties["exposed_type"] = DataNodeConfig._EXPOSED_TYPE_PANDAS
+
     def __copy__(self):
     def __copy__(self):
         return DataNodeConfig(self.id, self._storage_type, self._scope, self._validity_period, **copy(self._properties))
         return DataNodeConfig(self.id, self._storage_type, self._scope, self._validity_period, **copy(self._properties))
 
 

+ 12 - 33
taipy/core/data/_abstract_sql.py

@@ -16,7 +16,6 @@ from abc import abstractmethod
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from typing import Dict, List, Optional, Set, Tuple, Union
 from typing import Dict, List, Optional, Set, Tuple, Union
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 from sqlalchemy import create_engine, text
 from sqlalchemy import create_engine, text
@@ -35,13 +34,6 @@ class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode):
     """Abstract base class for data node implementations (SQLDataNode and SQLTableDataNode) that use SQL."""
     """Abstract base class for data node implementations (SQLDataNode and SQLTableDataNode) that use SQL."""
 
 
     __STORAGE_TYPE = "NOT_IMPLEMENTED"
     __STORAGE_TYPE = "NOT_IMPLEMENTED"
-
-    __EXPOSED_TYPE_PROPERTY = "exposed_type"
-    __EXPOSED_TYPE_NUMPY = "numpy"
-    __EXPOSED_TYPE_PANDAS = "pandas"
-    __EXPOSED_TYPE_MODIN = "modin"
-    __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_NUMPY, __EXPOSED_TYPE_MODIN]
-
     __DB_NAME_KEY = "db_name"
     __DB_NAME_KEY = "db_name"
     __DB_USERNAME_KEY = "db_username"
     __DB_USERNAME_KEY = "db_username"
     __DB_PASSWORD_KEY = "db_password"
     __DB_PASSWORD_KEY = "db_password"
@@ -103,9 +95,12 @@ class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode):
             properties = {}
             properties = {}
         self._check_required_properties(properties)
         self._check_required_properties(properties)
 
 
-        if self.__EXPOSED_TYPE_PROPERTY not in properties.keys():
-            properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS
-        self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES)
+        if self._EXPOSED_TYPE_PROPERTY not in properties.keys():
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        elif properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_MODIN:
+            # Deprecated in favor of pandas since 3.1.0
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        self._check_exposed_type(properties[self._EXPOSED_TYPE_PROPERTY])
 
 
         super().__init__(
         super().__init__(
             config_id,
             config_id,
@@ -138,7 +133,7 @@ class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode):
                 self.__DB_EXTRA_ARGS_KEY,
                 self.__DB_EXTRA_ARGS_KEY,
                 self.__SQLITE_FOLDER_PATH,
                 self.__SQLITE_FOLDER_PATH,
                 self.__SQLITE_FILE_EXTENSION,
                 self.__SQLITE_FILE_EXTENSION,
-                self.__EXPOSED_TYPE_PROPERTY,
+                self._EXPOSED_TYPE_PROPERTY,
             }
             }
         )
         )
 
 
@@ -200,25 +195,21 @@ class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode):
         raise UnknownDatabaseEngine(f"Unknown engine: {engine}")
         raise UnknownDatabaseEngine(f"Unknown engine: {engine}")
 
 
     def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
     def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator)
             return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator)
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN:
-            return self._read_as_modin_dataframe(operators=operators, join_operator=join_operator)
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy(operators=operators, join_operator=join_operator)
             return self._read_as_numpy(operators=operators, join_operator=join_operator)
         return self._read_as(operators=operators, join_operator=join_operator)
         return self._read_as(operators=operators, join_operator=join_operator)
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN:
-            return self._read_as_modin_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 
     def _read_as(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
     def _read_as(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
-        custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY]
+        custom_class = self.properties[self._EXPOSED_TYPE_PROPERTY]
         with self._get_engine().connect() as connection:
         with self._get_engine().connect() as connection:
             query_result = connection.execute(text(self._get_read_query(operators, join_operator)))
             query_result = connection.execute(text(self._get_read_query(operators, join_operator)))
         return [custom_class(**row) for row in query_result]
         return [custom_class(**row) for row in query_result]
@@ -239,18 +230,6 @@ class _AbstractSQLDataNode(DataNode, _AbstractTabularDataNode):
                 return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator))))[columns]
                 return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator))))[columns]
             return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator))))
             return pd.DataFrame(conn.execute(text(self._get_read_query(operators, join_operator))))
 
 
-    def _read_as_modin_dataframe(
-        self,
-        columns: Optional[List[str]] = None,
-        operators: Optional[Union[List, Tuple]] = None,
-        join_operator=JoinOperator.AND,
-    ):
-        if columns:
-            return modin_pd.read_sql_query(self._get_read_query(operators, join_operator), con=self._get_engine())[
-                columns
-            ]
-        return modin_pd.read_sql_query(self._get_read_query(operators, join_operator), con=self._get_engine())
-
     @abstractmethod
     @abstractmethod
     def _get_read_query(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
     def _get_read_query(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
         query = self._get_base_read_query()
         query = self._get_base_read_query()

+ 8 - 1
taipy/core/data/_abstract_tabular.py

@@ -16,8 +16,15 @@ class _AbstractTabularDataNode(object):
     """Abstract base class for tabular data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode,
     """Abstract base class for tabular data node implementations (CSVDataNode, ParquetDataNode, ExcelDataNode,
     SQLTableDataNode and SQLDataNode) that are tabular representable."""
     SQLTableDataNode and SQLDataNode) that are tabular representable."""
 
 
+    _EXPOSED_TYPE_PROPERTY = "exposed_type"
+    _EXPOSED_TYPE_NUMPY = "numpy"
+    _EXPOSED_TYPE_PANDAS = "pandas"
+    _EXPOSED_TYPE_MODIN = "modin"  # Deprecated in favor of pandas since 3.1.0
+    __VALID_STRING_EXPOSED_TYPES = [_EXPOSED_TYPE_PANDAS, _EXPOSED_TYPE_NUMPY]
+
     @staticmethod
     @staticmethod
-    def _check_exposed_type(exposed_type, valid_string_exposed_types):
+    def _check_exposed_type(exposed_type):
+        valid_string_exposed_types = _AbstractTabularDataNode.__VALID_STRING_EXPOSED_TYPES
         if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types:
         if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types:
             raise InvalidExposedType(
             raise InvalidExposedType(
                 f"Invalid string exposed type {exposed_type}. Supported values are "
                 f"Invalid string exposed type {exposed_type}. Supported values are "

+ 1 - 1
taipy/core/data/_data_converter.py

@@ -39,7 +39,7 @@ class _DataNodeConverter(_AbstractConverter):
     # While in practice, each data nodes might have different exposed type possibilities.
     # While in practice, each data nodes might have different exposed type possibilities.
     # The previous implementation used tabular datanode but it's no longer suitable so
     # The previous implementation used tabular datanode but it's no longer suitable so
     # new proposal is needed.
     # new proposal is needed.
-    _VALID_STRING_EXPOSED_TYPES = ["numpy", "pandas", "modin"]
+    _VALID_STRING_EXPOSED_TYPES = ["numpy", "pandas", "modin"]  # Modin is deprecated in favor of pandas since 3.1.0
 
 
     @classmethod
     @classmethod
     def __serialize_generic_dn_properties(cls, datanode_properties: dict):
     def __serialize_generic_dn_properties(cls, datanode_properties: dict):

+ 8 - 18
taipy/core/data/_filter.py

@@ -15,7 +15,6 @@ from itertools import chain
 from operator import and_, or_
 from operator import and_, or_
 from typing import Dict, Iterable, List, Tuple, Union
 from typing import Dict, Iterable, List, Tuple, Union
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 from pandas.core.common import is_bool_indexer
 from pandas.core.common import is_bool_indexer
@@ -26,12 +25,12 @@ from .operator import JoinOperator, Operator
 class _FilterDataNode:
 class _FilterDataNode:
     @staticmethod
     @staticmethod
     def __is_pandas_object(data) -> bool:
     def __is_pandas_object(data) -> bool:
-        return isinstance(data, (pd.DataFrame, modin_pd.DataFrame)) or isinstance(data, (pd.Series, modin_pd.DataFrame))
+        return isinstance(data, pd.DataFrame) or isinstance(data, pd.Series)
 
 
     @staticmethod
     @staticmethod
     def __is_multi_sheet_excel(data) -> bool:
     def __is_multi_sheet_excel(data) -> bool:
         if isinstance(data, Dict):
         if isinstance(data, Dict):
-            has_df_children = all(isinstance(e, (pd.DataFrame, modin_pd.DataFrame)) for e in data.values())
+            has_df_children = all(isinstance(e, pd.DataFrame) for e in data.values())
             has_list_children = all(isinstance(e, List) for e in data.values())
             has_list_children = all(isinstance(e, List) for e in data.values())
             has_np_array_children = all(isinstance(e, np.ndarray) for e in data.values())
             has_np_array_children = all(isinstance(e, np.ndarray) for e in data.values())
             return has_df_children or has_list_children or has_np_array_children
             return has_df_children or has_list_children or has_np_array_children
@@ -52,7 +51,7 @@ class _FilterDataNode:
         if isinstance(key, Hashable):
         if isinstance(key, Hashable):
             return _FilterDataNode.__getitem_hashable(data, key)
             return _FilterDataNode.__getitem_hashable(data, key)
 
 
-        if isinstance(key, (pd.DataFrame, modin_pd.DataFrame)):
+        if isinstance(key, pd.DataFrame):
             return _FilterDataNode.__getitem_dataframe(data, key)
             return _FilterDataNode.__getitem_dataframe(data, key)
 
 
         if is_bool_indexer(key):
         if is_bool_indexer(key):
@@ -78,7 +77,7 @@ class _FilterDataNode:
         return data[key]
         return data[key]
 
 
     @staticmethod
     @staticmethod
-    def __getitem_dataframe(data, key: Union[pd.DataFrame, modin_pd.DataFrame]):
+    def __getitem_dataframe(data, key: pd.DataFrame):
         if _FilterDataNode.__is_pandas_object(data):
         if _FilterDataNode.__is_pandas_object(data):
             return data[key]
             return data[key]
         if _FilterDataNode.__is_list_of_dict(data):
         if _FilterDataNode.__is_list_of_dict(data):
@@ -115,7 +114,7 @@ class _FilterDataNode:
             return {k: _FilterDataNode._filter(v, operators, join_operator) for k, v in data.items()}
             return {k: _FilterDataNode._filter(v, operators, join_operator) for k, v in data.items()}
 
 
         if not ((isinstance(operators[0], list)) or (isinstance(operators[0], tuple))):
         if not ((isinstance(operators[0], list)) or (isinstance(operators[0], tuple))):
-            if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+            if isinstance(data, pd.DataFrame):
                 return _FilterDataNode.__filter_dataframe_per_key_value(data, operators[0], operators[1], operators[2])
                 return _FilterDataNode.__filter_dataframe_per_key_value(data, operators[0], operators[1], operators[2])
             if isinstance(data, np.ndarray):
             if isinstance(data, np.ndarray):
                 list_operators = [operators]
                 list_operators = [operators]
@@ -123,7 +122,7 @@ class _FilterDataNode:
             if isinstance(data, List):
             if isinstance(data, List):
                 return _FilterDataNode.__filter_list_per_key_value(data, operators[0], operators[1], operators[2])
                 return _FilterDataNode.__filter_list_per_key_value(data, operators[0], operators[1], operators[2])
         else:
         else:
-            if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+            if isinstance(data, pd.DataFrame):
                 return _FilterDataNode.__filter_dataframe(data, operators, join_operator=join_operator)
                 return _FilterDataNode.__filter_dataframe(data, operators, join_operator=join_operator)
             if isinstance(data, np.ndarray):
             if isinstance(data, np.ndarray):
                 return _FilterDataNode.__filter_numpy_array(data, operators, join_operator=join_operator)
                 return _FilterDataNode.__filter_numpy_array(data, operators, join_operator=join_operator)
@@ -133,7 +132,7 @@ class _FilterDataNode:
 
 
     @staticmethod
     @staticmethod
     def __filter_dataframe(
     def __filter_dataframe(
-        df_data: Union[pd.DataFrame, modin_pd.DataFrame], operators: Union[List, Tuple], join_operator=JoinOperator.AND
+        df_data: pd.DataFrame, operators: Union[List, Tuple], join_operator=JoinOperator.AND
     ):
     ):
         filtered_df_data = []
         filtered_df_data = []
         if join_operator == JoinOperator.AND:
         if join_operator == JoinOperator.AND:
@@ -145,16 +144,11 @@ class _FilterDataNode:
         for key, value, operator in operators:
         for key, value, operator in operators:
             filtered_df_data.append(_FilterDataNode.__filter_dataframe_per_key_value(df_data, key, value, operator))
             filtered_df_data.append(_FilterDataNode.__filter_dataframe_per_key_value(df_data, key, value, operator))
 
 
-        if isinstance(df_data, modin_pd.DataFrame):
-            if filtered_df_data:
-                return _FilterDataNode.__modin_dataframe_merge(filtered_df_data, how)
-            return modin_pd.DataFrame()
-
         return _FilterDataNode.__dataframe_merge(filtered_df_data, how) if filtered_df_data else pd.DataFrame()
         return _FilterDataNode.__dataframe_merge(filtered_df_data, how) if filtered_df_data else pd.DataFrame()
 
 
     @staticmethod
     @staticmethod
     def __filter_dataframe_per_key_value(
     def __filter_dataframe_per_key_value(
-        df_data: Union[pd.DataFrame, modin_pd.DataFrame], key: str, value, operator: Operator
+        df_data: pd.DataFrame, key: str, value, operator: Operator
     ):
     ):
         df_by_col = df_data[key]
         df_by_col = df_data[key]
         if operator == Operator.EQUAL:
         if operator == Operator.EQUAL:
@@ -175,10 +169,6 @@ class _FilterDataNode:
     def __dataframe_merge(df_list: List, how="inner"):
     def __dataframe_merge(df_list: List, how="inner"):
         return reduce(lambda df1, df2: pd.merge(df1, df2, how=how), df_list)
         return reduce(lambda df1, df2: pd.merge(df1, df2, how=how), df_list)
 
 
-    @staticmethod
-    def __modin_dataframe_merge(df_list: List, how="inner"):
-        return reduce(lambda df1, df2: modin_pd.merge(df1, df2, how=how), df_list)
-
     @staticmethod
     @staticmethod
     def __filter_numpy_array(data: np.ndarray, operators: Union[List, Tuple], join_operator=JoinOperator.AND):
     def __filter_numpy_array(data: np.ndarray, operators: Union[List, Tuple], join_operator=JoinOperator.AND):
         conditions = []
         conditions = []

+ 12 - 34
taipy/core/data/csv.py

@@ -15,7 +15,6 @@ from datetime import datetime, timedelta
 from os.path import isfile
 from os.path import isfile
 from typing import Any, Dict, List, Optional, Set
 from typing import Any, Dict, List, Optional, Set
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 
 
@@ -66,11 +65,6 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
     """
     """
 
 
     __STORAGE_TYPE = "csv"
     __STORAGE_TYPE = "csv"
-    __EXPOSED_TYPE_PROPERTY = "exposed_type"
-    __EXPOSED_TYPE_NUMPY = "numpy"
-    __EXPOSED_TYPE_PANDAS = "pandas"
-    __EXPOSED_TYPE_MODIN = "modin"
-    __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY]
     __PATH_KEY = "path"
     __PATH_KEY = "path"
     __DEFAULT_PATH_KEY = "default_path"
     __DEFAULT_PATH_KEY = "default_path"
     __ENCODING_KEY = "encoding"
     __ENCODING_KEY = "encoding"
@@ -105,9 +99,12 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
         if self.__HAS_HEADER_PROPERTY not in properties.keys():
         if self.__HAS_HEADER_PROPERTY not in properties.keys():
             properties[self.__HAS_HEADER_PROPERTY] = True
             properties[self.__HAS_HEADER_PROPERTY] = True
 
 
-        if self.__EXPOSED_TYPE_PROPERTY not in properties.keys():
-            properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS
-        self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES)
+        if self._EXPOSED_TYPE_PROPERTY not in properties.keys():
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        elif properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_MODIN:
+            # Deprecated in favor of pandas since 3.1.0
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        self._check_exposed_type(properties[self._EXPOSED_TYPE_PROPERTY])
 
 
         super().__init__(
         super().__init__(
             config_id,
             config_id,
@@ -146,7 +143,7 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
         self._TAIPY_PROPERTIES.update(
         self._TAIPY_PROPERTIES.update(
             {
             {
-                self.__EXPOSED_TYPE_PROPERTY,
+                self._EXPOSED_TYPE_PROPERTY,
                 self.__PATH_KEY,
                 self.__PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__ENCODING_KEY,
                 self.__ENCODING_KEY,
@@ -172,16 +169,14 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
         _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path)
         _replace_in_backup_file(old_file_path=tmp_old_path, new_file_path=self._path)
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN:
-            return self._read_as_modin_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 
     def _read_as(self):
     def _read_as(self):
-        custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY]
+        custom_class = self.properties[self._EXPOSED_TYPE_PROPERTY]
         with open(self._path, encoding=self.properties[self.__ENCODING_KEY]) as csvFile:
         with open(self._path, encoding=self.properties[self.__ENCODING_KEY]) as csvFile:
             res = list()
             res = list()
             if self.properties[self.__HAS_HEADER_PROPERTY]:
             if self.properties[self.__HAS_HEADER_PROPERTY]:
@@ -216,25 +211,8 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
         except pd.errors.EmptyDataError:
         except pd.errors.EmptyDataError:
             return pd.DataFrame()
             return pd.DataFrame()
 
 
-    def _read_as_modin_dataframe(
-        self, usecols: Optional[List[int]] = None, column_names: Optional[List[str]] = None
-    ) -> modin_pd.DataFrame:
-        try:
-            if self.properties[self.__HAS_HEADER_PROPERTY]:
-                if column_names:
-                    return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names]
-                return modin_pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])
-            else:
-                if usecols:
-                    return modin_pd.read_csv(
-                        self._path, header=None, usecols=usecols, encoding=self.properties[self.__ENCODING_KEY]
-                    )
-                return modin_pd.read_csv(self._path, header=None, encoding=self.properties[self.__ENCODING_KEY])
-        except pd.errors.EmptyDataError:
-            return modin_pd.DataFrame()
-
     def _append(self, data: Any):
     def _append(self, data: Any):
-        if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+        if isinstance(data, pd.DataFrame):
             data.to_csv(self._path, mode="a", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False)
             data.to_csv(self._path, mode="a", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False)
         else:
         else:
             pd.DataFrame(data).to_csv(
             pd.DataFrame(data).to_csv(
@@ -242,7 +220,7 @@ class CSVDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
             )
             )
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
-        if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+        if isinstance(data, pd.DataFrame):
             data.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
             data.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
         else:
         else:
             pd.DataFrame(data).to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
             pd.DataFrame(data).to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])

+ 27 - 60
taipy/core/data/excel.py

@@ -15,7 +15,6 @@ from datetime import datetime, timedelta
 from os.path import isfile
 from os.path import isfile
 from typing import Any, Dict, List, Optional, Set, Tuple, Union
 from typing import Any, Dict, List, Optional, Set, Tuple, Union
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 from openpyxl import load_workbook
 from openpyxl import load_workbook
@@ -71,11 +70,6 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
     """
     """
 
 
     __STORAGE_TYPE = "excel"
     __STORAGE_TYPE = "excel"
-    __EXPOSED_TYPE_PROPERTY = "exposed_type"
-    __EXPOSED_TYPE_NUMPY = "numpy"
-    __EXPOSED_TYPE_PANDAS = "pandas"
-    __EXPOSED_TYPE_MODIN = "modin"
-    __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY]
     __PATH_KEY = "path"
     __PATH_KEY = "path"
     __DEFAULT_DATA_KEY = "default_data"
     __DEFAULT_DATA_KEY = "default_data"
     __DEFAULT_PATH_KEY = "default_path"
     __DEFAULT_PATH_KEY = "default_path"
@@ -110,9 +104,12 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
             properties[self.__SHEET_NAME_PROPERTY] = None
             properties[self.__SHEET_NAME_PROPERTY] = None
         if self.__HAS_HEADER_PROPERTY not in properties.keys():
         if self.__HAS_HEADER_PROPERTY not in properties.keys():
             properties[self.__HAS_HEADER_PROPERTY] = True
             properties[self.__HAS_HEADER_PROPERTY] = True
-        if self.__EXPOSED_TYPE_PROPERTY not in properties.keys():
-            properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS
-        self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES)
+        if self._EXPOSED_TYPE_PROPERTY not in properties.keys():
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        elif properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_MODIN:
+            # Deprecated in favor of pandas since 3.1.0
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        self._check_exposed_type(properties[self._EXPOSED_TYPE_PROPERTY])
 
 
         super().__init__(
         super().__init__(
             config_id,
             config_id,
@@ -151,7 +148,7 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
         self._TAIPY_PROPERTIES.update(
         self._TAIPY_PROPERTIES.update(
             {
             {
-                self.__EXPOSED_TYPE_PROPERTY,
+                self._EXPOSED_TYPE_PROPERTY,
                 self.__PATH_KEY,
                 self.__PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__DEFAULT_DATA_KEY,
                 self.__DEFAULT_DATA_KEY,
@@ -177,22 +174,20 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
         return cls.__STORAGE_TYPE
         return cls.__STORAGE_TYPE
 
 
     @staticmethod
     @staticmethod
-    def _check_exposed_type(exposed_type, valid_string_exposed_types):
+    def _check_exposed_type(exposed_type):
         if isinstance(exposed_type, str):
         if isinstance(exposed_type, str):
-            _AbstractTabularDataNode._check_exposed_type(exposed_type, valid_string_exposed_types)
+            _AbstractTabularDataNode._check_exposed_type(exposed_type)
         elif isinstance(exposed_type, list):
         elif isinstance(exposed_type, list):
             for t in exposed_type:
             for t in exposed_type:
-                _AbstractTabularDataNode._check_exposed_type(t, valid_string_exposed_types)
+                _AbstractTabularDataNode._check_exposed_type(t)
         elif isinstance(exposed_type, dict):
         elif isinstance(exposed_type, dict):
             for t in exposed_type.values():
             for t in exposed_type.values():
-                _AbstractTabularDataNode._check_exposed_type(t, valid_string_exposed_types)
+                _AbstractTabularDataNode._check_exposed_type(t)
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN:
-            return self._read_as_modin_dataframe()
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 
@@ -207,7 +202,7 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
     def _read_as(self):
     def _read_as(self):
         excel_file = load_workbook(self._path)
         excel_file = load_workbook(self._path)
-        exposed_type = self.properties[self.__EXPOSED_TYPE_PROPERTY]
+        exposed_type = self.properties[self._EXPOSED_TYPE_PROPERTY]
         work_books = defaultdict()
         work_books = defaultdict()
         sheet_names = excel_file.sheetnames
         sheet_names = excel_file.sheetnames
         provided_sheet_names = self.__sheet_name_to_list(self.properties)
         provided_sheet_names = self.__sheet_name_to_list(self.properties)
@@ -217,10 +212,10 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
                 raise NonExistingExcelSheet(sheet_name, self._path)
                 raise NonExistingExcelSheet(sheet_name, self._path)
 
 
         if isinstance(exposed_type, List):
         if isinstance(exposed_type, List):
-            if len(provided_sheet_names) != len(self.properties[self.__EXPOSED_TYPE_PROPERTY]):
+            if len(provided_sheet_names) != len(self.properties[self._EXPOSED_TYPE_PROPERTY]):
                 raise ExposedTypeLengthMismatch(
                 raise ExposedTypeLengthMismatch(
                     f"Expected {len(provided_sheet_names)} exposed types, got "
                     f"Expected {len(provided_sheet_names)} exposed types, got "
-                    f"{len(self.properties[self.__EXPOSED_TYPE_PROPERTY])}"
+                    f"{len(self.properties[self._EXPOSED_TYPE_PROPERTY])}"
                 )
                 )
 
 
         for i, sheet_name in enumerate(provided_sheet_names):
         for i, sheet_name in enumerate(provided_sheet_names):
@@ -229,14 +224,14 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
             if not isinstance(sheet_exposed_type, str):
             if not isinstance(sheet_exposed_type, str):
                 if isinstance(exposed_type, dict):
                 if isinstance(exposed_type, dict):
-                    sheet_exposed_type = exposed_type.get(sheet_name, self.__EXPOSED_TYPE_PANDAS)
+                    sheet_exposed_type = exposed_type.get(sheet_name, self._EXPOSED_TYPE_PANDAS)
                 elif isinstance(exposed_type, List):
                 elif isinstance(exposed_type, List):
                     sheet_exposed_type = exposed_type[i]
                     sheet_exposed_type = exposed_type[i]
 
 
                 if isinstance(sheet_exposed_type, str):
                 if isinstance(sheet_exposed_type, str):
-                    if sheet_exposed_type == self.__EXPOSED_TYPE_NUMPY:
+                    if sheet_exposed_type == self._EXPOSED_TYPE_NUMPY:
                         work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name).to_numpy()
                         work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name).to_numpy()
-                    elif sheet_exposed_type == self.__EXPOSED_TYPE_PANDAS:
+                    elif sheet_exposed_type == self._EXPOSED_TYPE_PANDAS:
                         work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name)
                         work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name)
                     continue
                     continue
 
 
@@ -264,21 +259,8 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
             return {sheet_name: df.to_numpy() for sheet_name, df in sheets.items()}
             return {sheet_name: df.to_numpy() for sheet_name, df in sheets.items()}
         return sheets.to_numpy()
         return sheets.to_numpy()
 
 
-    def _do_read_excel(self, engine, sheet_names, kwargs) -> Union[Dict[Union[int, str], pd.DataFrame], pd.DataFrame]:
-        df = pd.read_excel(
-            self._path,
-            sheet_name=sheet_names,
-            **kwargs,
-        )
-        # We are using pandas to load modin dataframes because of a modin issue
-        # https://github.com/modin-project/modin/issues/4924
-        if engine == "modin":
-            if isinstance(df, dict):  # Check if it s a multiple sheet Excel file
-                for key, value in df.items():
-                    df[key] = modin_pd.DataFrame(value)
-                return df
-            return modin_pd.DataFrame(df)
-        return df
+    def _do_read_excel(self, sheet_names, kwargs) -> Union[Dict[Union[int, str], pd.DataFrame], pd.DataFrame]:
+        return pd.read_excel(self._path, sheet_name=sheet_names, **kwargs)
 
 
     def __get_sheet_names_and_header(self, sheet_names):
     def __get_sheet_names_and_header(self, sheet_names):
         kwargs: Dict[str, Any] = {}
         kwargs: Dict[str, Any] = {}
@@ -291,25 +273,10 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
     def _read_as_pandas_dataframe(self, sheet_names=None) -> Union[Dict[Union[int, str], pd.DataFrame], pd.DataFrame]:
     def _read_as_pandas_dataframe(self, sheet_names=None) -> Union[Dict[Union[int, str], pd.DataFrame], pd.DataFrame]:
         sheet_names, kwargs = self.__get_sheet_names_and_header(sheet_names)
         sheet_names, kwargs = self.__get_sheet_names_and_header(sheet_names)
         try:
         try:
-            return self._do_read_excel("pandas", sheet_names, kwargs)
+            return self._do_read_excel(sheet_names, kwargs)
         except pd.errors.EmptyDataError:
         except pd.errors.EmptyDataError:
             return pd.DataFrame()
             return pd.DataFrame()
 
 
-    def _read_as_modin_dataframe(
-        self, sheet_names=None
-    ) -> Union[Dict[Union[int, str], modin_pd.DataFrame], modin_pd.DataFrame]:
-        sheet_names, kwargs = self.__get_sheet_names_and_header(sheet_names)
-        try:
-            if kwargs.get("header", None):
-                return modin_pd.read_excel(
-                    self._path,
-                    sheet_name=sheet_names,
-                    **kwargs,
-                )
-            else:
-                return self._do_read_excel("modin", sheet_names, kwargs)
-        except pd.errors.EmptyDataError:
-            return modin_pd.DataFrame()
 
 
     def __append_excel_with_single_sheet(self, append_excel_fct, *args, **kwargs):
     def __append_excel_with_single_sheet(self, append_excel_fct, *args, **kwargs):
         sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY)
         sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY)
@@ -343,10 +310,10 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
     def _append(self, data: Any):
     def _append(self, data: Any):
         if isinstance(data, Dict) and all(
         if isinstance(data, Dict) and all(
-            isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()
+            isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()
         ):
         ):
             self.__append_excel_with_multiple_sheets(data)
             self.__append_excel_with_multiple_sheets(data)
-        elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+        elif isinstance(data, pd.DataFrame):
             self.__append_excel_with_single_sheet(data.to_excel, index=False, header=False)
             self.__append_excel_with_single_sheet(data.to_excel, index=False, header=False)
         else:
         else:
             self.__append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False)
             self.__append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False)
@@ -379,10 +346,10 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
         if isinstance(data, Dict) and all(
         if isinstance(data, Dict) and all(
-            isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()
+            isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()
         ):
         ):
             self.__write_excel_with_multiple_sheets(data)
             self.__write_excel_with_multiple_sheets(data)
-        elif isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+        elif isinstance(data, pd.DataFrame):
             self.__write_excel_with_single_sheet(data.to_excel, self._path, index=False)
             self.__write_excel_with_single_sheet(data.to_excel, self._path, index=False)
         else:
         else:
             self.__write_excel_with_single_sheet(pd.DataFrame(data).to_excel, self._path, index=False)
             self.__write_excel_with_single_sheet(pd.DataFrame(data).to_excel, self._path, index=False)
@@ -396,7 +363,7 @@ class ExcelDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode):
             job_id (JobId^): An optional identifier of the writer.
             job_id (JobId^): An optional identifier of the writer.
         """
         """
         if isinstance(data, Dict) and all(
         if isinstance(data, Dict) and all(
-            isinstance(x, (pd.DataFrame, modin_pd.DataFrame, np.ndarray)) for x in data.values()
+            isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()
         ):
         ):
             self.__write_excel_with_multiple_sheets(data, columns=columns)
             self.__write_excel_with_multiple_sheets(data, columns=columns)
         else:
         else:

+ 11 - 19
taipy/core/data/parquet.py

@@ -14,7 +14,6 @@ from datetime import datetime, timedelta
 from os.path import isdir, isfile
 from os.path import isdir, isfile
 from typing import Any, Dict, List, Optional, Set
 from typing import Any, Dict, List, Optional, Set
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 
 
@@ -77,11 +76,6 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
     """
     """
 
 
     __STORAGE_TYPE = "parquet"
     __STORAGE_TYPE = "parquet"
-    __EXPOSED_TYPE_PROPERTY = "exposed_type"
-    __EXPOSED_TYPE_NUMPY = "numpy"
-    __EXPOSED_TYPE_PANDAS = "pandas"
-    __EXPOSED_TYPE_MODIN = "modin"
-    __VALID_STRING_EXPOSED_TYPES = [__EXPOSED_TYPE_PANDAS, __EXPOSED_TYPE_MODIN, __EXPOSED_TYPE_NUMPY]
     __PATH_KEY = "path"
     __PATH_KEY = "path"
     __DEFAULT_DATA_KEY = "default_data"
     __DEFAULT_DATA_KEY = "default_data"
     __DEFAULT_PATH_KEY = "default_path"
     __DEFAULT_PATH_KEY = "default_path"
@@ -141,9 +135,12 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
         if self.__WRITE_KWARGS_PROPERTY not in properties.keys():
         if self.__WRITE_KWARGS_PROPERTY not in properties.keys():
             properties[self.__WRITE_KWARGS_PROPERTY] = {}
             properties[self.__WRITE_KWARGS_PROPERTY] = {}
 
 
-        if self.__EXPOSED_TYPE_PROPERTY not in properties.keys():
-            properties[self.__EXPOSED_TYPE_PROPERTY] = self.__EXPOSED_TYPE_PANDAS
-        self._check_exposed_type(properties[self.__EXPOSED_TYPE_PROPERTY], self.__VALID_STRING_EXPOSED_TYPES)
+        if self._EXPOSED_TYPE_PROPERTY not in properties.keys():
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        elif properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_MODIN:
+            # Deprecated in favor of pandas since 3.1.0
+            properties[self._EXPOSED_TYPE_PROPERTY] = self._EXPOSED_TYPE_PANDAS
+        self._check_exposed_type(properties[self._EXPOSED_TYPE_PROPERTY])
 
 
         super().__init__(
         super().__init__(
             config_id,
             config_id,
@@ -183,7 +180,7 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
 
 
         self._TAIPY_PROPERTIES.update(
         self._TAIPY_PROPERTIES.update(
             {
             {
-                self.__EXPOSED_TYPE_PROPERTY,
+                self._EXPOSED_TYPE_PROPERTY,
                 self.__PATH_KEY,
                 self.__PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__DEFAULT_PATH_KEY,
                 self.__DEFAULT_DATA_KEY,
                 self.__DEFAULT_DATA_KEY,
@@ -214,7 +211,7 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
         return self.read_with_kwargs()
         return self.read_with_kwargs()
 
 
     def _read_as(self, read_kwargs: Dict):
     def _read_as(self, read_kwargs: Dict):
-        custom_class = self.properties[self.__EXPOSED_TYPE_PROPERTY]
+        custom_class = self.properties[self._EXPOSED_TYPE_PROPERTY]
         list_of_dicts = self._read_as_pandas_dataframe(read_kwargs).to_dict(orient="records")
         list_of_dicts = self._read_as_pandas_dataframe(read_kwargs).to_dict(orient="records")
         return [custom_class(**dct) for dct in list_of_dicts]
         return [custom_class(**dct) for dct in list_of_dicts]
 
 
@@ -224,9 +221,6 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
     def _read_as_pandas_dataframe(self, read_kwargs: Dict) -> pd.DataFrame:
     def _read_as_pandas_dataframe(self, read_kwargs: Dict) -> pd.DataFrame:
         return pd.read_parquet(self._path, **read_kwargs)
         return pd.read_parquet(self._path, **read_kwargs)
 
 
-    def _read_as_modin_dataframe(self, read_kwargs: Dict) -> modin_pd.DataFrame:
-        return modin_pd.read_parquet(self._path, **read_kwargs)
-
     def _append(self, data: Any):
     def _append(self, data: Any):
         self.write_with_kwargs(data, engine="fastparquet", append=True)
         self.write_with_kwargs(data, engine="fastparquet", append=True)
 
 
@@ -250,7 +244,7 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
         }
         }
         kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY])
         kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY])
         kwargs.update(write_kwargs)
         kwargs.update(write_kwargs)
-        if isinstance(data, (pd.DataFrame, modin_pd.DataFrame)):
+        if isinstance(data, pd.DataFrame):
             data.to_parquet(self._path, **kwargs)
             data.to_parquet(self._path, **kwargs)
         else:
         else:
             pd.DataFrame(data).to_parquet(self._path, **kwargs)
             pd.DataFrame(data).to_parquet(self._path, **kwargs)
@@ -280,10 +274,8 @@ class ParquetDataNode(DataNode, _AbstractFileDataNode, _AbstractTabularDataNode)
         )
         )
         kwargs.update(read_kwargs)
         kwargs.update(read_kwargs)
 
 
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_PANDAS:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe(kwargs)
             return self._read_as_pandas_dataframe(kwargs)
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_MODIN:
-            return self._read_as_modin_dataframe(kwargs)
-        if self.properties[self.__EXPOSED_TYPE_PROPERTY] == self.__EXPOSED_TYPE_NUMPY:
+        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy(kwargs)
             return self._read_as_numpy(kwargs)
         return self._read_as(kwargs)
         return self._read_as(kwargs)

+ 0 - 5
taipy/core/data/pickle.py

@@ -14,8 +14,6 @@ import pickle
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from typing import Any, List, Optional, Set
 from typing import Any, List, Optional, Set
 
 
-import modin.pandas as pd
-
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 
 
 from .._backup._backup import _replace_in_backup_file
 from .._backup._backup import _replace_in_backup_file
@@ -153,12 +151,9 @@ class PickleDataNode(DataNode, _AbstractFileDataNode):
         return self._is_generated
         return self._is_generated
 
 
     def _read(self):
     def _read(self):
-        os.environ["MODIN_PERSISTENT_PICKLE"] = "True"
         with open(self._path, "rb") as pf:
         with open(self._path, "rb") as pf:
             return pickle.load(pf)
             return pickle.load(pf)
 
 
     def _write(self, data):
     def _write(self, data):
-        if isinstance(data, (pd.DataFrame, pd.Series)):
-            os.environ["MODIN_PERSISTENT_PICKLE"] = "True"
         with open(self._path, "wb") as pf:
         with open(self._path, "wb") as pf:
             pickle.dump(data, pf)
             pickle.dump(data, pf)

+ 2 - 3
taipy/core/data/sql_table.py

@@ -12,7 +12,6 @@
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from typing import Any, Dict, List, Optional, Set, Tuple, Union
 from typing import Any, Dict, List, Optional, Set, Tuple, Union
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 from sqlalchemy import MetaData, Table
 from sqlalchemy import MetaData, Table
@@ -133,7 +132,7 @@ class SQLTableDataNode(_AbstractSQLDataNode):
             delete_table (bool): indicates if the table should be deleted before inserting the data.
             delete_table (bool): indicates if the table should be deleted before inserting the data.
         """
         """
         table = self._create_table(engine)
         table = self._create_table(engine)
-        if isinstance(data, (modin_pd.DataFrame, pd.DataFrame)):
+        if isinstance(data, pd.DataFrame):
             self.__insert_dataframe(data, table, connection, delete_table)
             self.__insert_dataframe(data, table, connection, delete_table)
             return
             return
 
 
@@ -172,7 +171,7 @@ class SQLTableDataNode(_AbstractSQLDataNode):
 
 
     @classmethod
     @classmethod
     def __insert_dataframe(
     def __insert_dataframe(
-        cls, df: Union[modin_pd.DataFrame, pd.DataFrame], table: Any, connection: Any, delete_table: bool
+        cls, df: pd.DataFrame, table: Any, connection: Any, delete_table: bool
     ) -> None:
     ) -> None:
         cls.__insert_dicts(df.to_dict(orient="records"), table, connection, delete_table)
         cls.__insert_dicts(df.to_dict(orient="records"), table, connection, delete_table)
 
 

+ 1 - 1
taipy/core/setup.py

@@ -30,7 +30,7 @@ requirements = [
     "pyarrow>=10.0.1,<11.0",
     "pyarrow>=10.0.1,<11.0",
     "networkx>=2.6,<3.0",
     "networkx>=2.6,<3.0",
     "openpyxl>=3.1.2,<3.2",
     "openpyxl>=3.1.2,<3.2",
-    "modin[dask]>=0.23.1,<1.0",
+    "pandas>=2.0.0,<3.0",
     "pymongo[srv]>=4.2.0,<5.0",
     "pymongo[srv]>=4.2.0,<5.0",
     "sqlalchemy>=2.0.16,<2.1",
     "sqlalchemy>=2.0.16,<2.1",
     "toml>=0.10,<0.11",
     "toml>=0.10,<0.11",

+ 5 - 4
tests/core/config/checkers/test_data_node_config_checker.py

@@ -664,7 +664,7 @@ class TestDataNodeConfigChecker:
             Config.check()
             Config.check()
         assert len(Config._collector.errors) == 1
         assert len(Config._collector.errors) == 1
         expected_error_message = (
         expected_error_message = (
-            'The `exposed_type` of DataNodeConfig `default` must be either "pandas", "modin"'
+            'The `exposed_type` of DataNodeConfig `default` must be either "pandas"'
             ', "numpy", or a custom type. Current value of property `exposed_type` is "foo".'
             ', "numpy", or a custom type. Current value of property `exposed_type` is "foo".'
         )
         )
         assert expected_error_message in caplog.text
         assert expected_error_message in caplog.text
@@ -675,9 +675,10 @@ class TestDataNodeConfigChecker:
         assert len(Config._collector.errors) == 0
         assert len(Config._collector.errors) == 0
 
 
         config._sections[DataNodeConfig.name]["default"].properties = {"exposed_type": "modin"}
         config._sections[DataNodeConfig.name]["default"].properties = {"exposed_type": "modin"}
-        Config._collector = IssueCollector()
-        Config.check()
-        assert len(Config._collector.errors) == 0
+        with pytest.raises(SystemExit):
+            Config._collector = IssueCollector()
+            Config.check()
+        assert len(Config._collector.errors) == 1
 
 
         config._sections[DataNodeConfig.name]["default"].properties = {"exposed_type": "numpy"}
         config._sections[DataNodeConfig.name]["default"].properties = {"exposed_type": "numpy"}
         Config._collector = IssueCollector()
         Config._collector = IssueCollector()

+ 8 - 139
tests/core/data/test_csv_data_node.py

@@ -14,11 +14,9 @@ import pathlib
 from datetime import datetime
 from datetime import datetime
 from time import sleep
 from time import sleep
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
-from modin.pandas.test.utils import df_equals
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -71,6 +69,14 @@ class TestCSVDataNode:
                 "foo bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"}
                 "foo bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"}
             )
             )
 
 
+    def test_modin_deprecated_in_favor_of_pandas(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
+        # Create CSVDataNode with modin exposed_type
+        csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
+        assert csv_data_node_as_modin.properties["exposed_type"] == "pandas"
+        data_modin = csv_data_node_as_modin.read()
+        assert isinstance(data_modin, pd.DataFrame)
+
     def test_get_user_properties(self, csv_file):
     def test_get_user_properties(self, csv_file):
         dn_1 = CSVDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})
         dn_1 = CSVDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})
         assert dn_1._get_user_properties() == {}
         assert dn_1._get_user_properties() == {}
@@ -125,16 +131,6 @@ class TestCSVDataNode:
         assert len(data_pandas) == 10
         assert len(data_pandas) == 10
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
 
 
-    @pytest.mark.modin
-    def test_read_with_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
-        # Create CSVDataNode with modin exposed_type
-        csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
-        data_modin = csv_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 10
-        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy())
-
     def test_read_with_header_numpy(self):
     def test_read_with_header_numpy(self):
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create CSVDataNode with numpy exposed_type
         # Create CSVDataNode with numpy exposed_type
@@ -203,18 +199,6 @@ class TestCSVDataNode:
             assert str(row_pandas[1]) == row_custom.integer
             assert str(row_pandas[1]) == row_custom.integer
             assert row_pandas[2] == row_custom.text
             assert row_pandas[2] == row_custom.text
 
 
-    @pytest.mark.modin
-    def test_read_without_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
-        # Create CSVDataNode with modin exposed_type
-        csv_data_node_as_modin = CSVDataNode(
-            "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin = csv_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 11
-        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -233,26 +217,6 @@ class TestCSVDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
         )
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            (pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
-            ([[11, 22, 33], [44, 55, 66]]),
-        ],
-    )
-    def test_append_modin(self, csv_file, default_data_frame, content):
-        csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
-        df_equals(csv_dn.read(), modin_pd.DataFrame(default_data_frame))
-
-        csv_dn.append(content)
-        df_equals(
-            csv_dn.read(),
-            modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(
-                drop=True
-            ),
-        )
 
 
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
@@ -294,51 +258,6 @@ class TestCSVDataNode:
         with pytest.raises(UnicodeError):
         with pytest.raises(UnicodeError):
             utf8_dn.read()
             utf8_dn.read()
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content,columns",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}], None),
-            ([[11, 22, 33], [44, 55, 66]], None),
-            ([[11, 22, 33], [44, 55, 66]], ["e", "f", "g"]),
-        ],
-    )
-    def test_write_modin(self, csv_file, default_data_frame, content, columns):
-        default_data_frame = modin_pd.DataFrame(default_data_frame)
-        csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
-        assert np.array_equal(csv_dn.read().values, default_data_frame.values)
-        if not columns:
-            csv_dn.write(content)
-            df = pd.DataFrame(content)
-        else:
-            csv_dn.write_with_column_names(content, columns)
-            df = pd.DataFrame(content, columns=columns)
-        assert np.array_equal(csv_dn.read().values, df.values)
-
-        csv_dn.write(None)
-        assert len(csv_dn.read()) == 0
-
-    @pytest.mark.modin
-    def test_write_modin_with_different_encoding(self, csv_file):
-        data = pd.DataFrame([{"≥a": 1, "b": 2}])
-
-        utf8_dn = CSVDataNode("utf8_dn", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
-        utf16_dn = CSVDataNode(
-            "utf16_dn", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin", "encoding": "utf-16"}
-        )
-
-        # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding
-        utf8_dn.write(data)
-        assert np.array_equal(utf8_dn.read(), data)
-        with pytest.raises(UnicodeError):
-            utf16_dn.read()
-
-        # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding
-        utf16_dn.write(data)
-        assert np.array_equal(utf16_dn.read(), data)
-        with pytest.raises(UnicodeError):
-            utf8_dn.read()
-
     def test_set_path(self):
     def test_set_path(self):
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.csv"})
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.csv"})
         assert dn.path == "foo.csv"
         assert dn.path == "foo.csv"
@@ -411,56 +330,6 @@ class TestCSVDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type(self, csv_file):
-        dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
-        dn.write(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 1},
-                {"foo": 2, "bar": 2},
-                {"bar": 2},
-            ]
-        )
-
-        # Test datanode indexing and slicing
-        assert dn["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
-        assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
-
-        # Test filter data
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["foo"] == 1]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}, {"foo": 1.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
-        filtered_by_indexing = dn[dn["foo"] != 1]
-        expected_data = modin_pd.DataFrame([{"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("bar", 2, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["bar"] == 2]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 2.0}, {"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
-        filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
-        expected_data = modin_pd.DataFrame(
-            [
-                {"foo": 1.0, "bar": 1.0},
-                {"foo": 1.0, "bar": 2.0},
-                {"foo": 2.0, "bar": 2.0},
-                {"bar": 2.0},
-            ]
-        )
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
     def test_filter_numpy_exposed_type(self, csv_file):
     def test_filter_numpy_exposed_type(self, csv_file):
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "numpy"})
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "numpy"})
         dn.write(
         dn.write(

+ 9 - 369
tests/core/data/test_excel_data_node.py

@@ -15,11 +15,9 @@ from datetime import datetime
 from time import sleep
 from time import sleep
 from typing import Dict
 from typing import Dict
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
-from modin.pandas.test.utils import df_equals
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -120,6 +118,15 @@ class TestExcelDataNode:
         # exposed_type, default_data, default_path, path, has_header are filtered out
         # exposed_type, default_data, default_path, path, has_header are filtered out
         assert dn_2._get_user_properties() == {"foo": "bar"}
         assert dn_2._get_user_properties() == {"foo": "bar"}
 
 
+    def test_modin_deprecated_in_favor_of_pandas(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        # Create ExcelDataNode with modin exposed_type
+        props = {"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
+        modin_dn = ExcelDataNode("bar", Scope.SCENARIO, properties=props)
+        assert modin_dn.properties["exposed_type"] == "pandas"
+        data_modin = modin_dn.read()
+        assert isinstance(data_modin, pd.DataFrame)
+
     def test_read_with_header(self):
     def test_read_with_header(self):
         with pytest.raises(NoData):
         with pytest.raises(NoData):
             not_existing_excel = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx"})
             not_existing_excel = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx"})
@@ -181,20 +188,6 @@ class TestExcelDataNode:
             assert row_pandas["integer"] == row_custom.integer
             assert row_pandas["integer"] == row_custom.integer
             assert row_pandas["text"] == row_custom.text
             assert row_pandas["text"] == row_custom.text
 
 
-    @pytest.mark.modin
-    def test_read_with_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
-
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 5
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy())
-
     def test_read_without_header(self):
     def test_read_without_header(self):
         not_existing_excel = ExcelDataNode(
         not_existing_excel = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx", "has_header": False}
             "foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx", "has_header": False}
@@ -256,20 +249,6 @@ class TestExcelDataNode:
             assert row_pandas[1] == row_custom.integer
             assert row_pandas[1] == row_custom.integer
             assert row_pandas[2] == row_custom.text
             assert row_pandas[2] == row_custom.text
 
 
-    @pytest.mark.modin
-    def test_read_without_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": "Sheet1", "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 6
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy())
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -405,32 +384,6 @@ class TestExcelDataNode:
         else:
         else:
             assert len(excel_dn.read()) == 1
             assert len(excel_dn.read()) == 1
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content,columns",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}], None),
-            ([[11, 22, 33], [44, 55, 66]], None),
-            ([[11, 22, 33], [44, 55, 66]], ["e", "f", "g"]),
-        ],
-    )
-    def test_write_modin(self, excel_file, default_data_frame, content, columns):
-        excel_dn = ExcelDataNode(
-            "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-        assert np.array_equal(excel_dn.read().values, default_data_frame.values)
-        if not columns:
-            excel_dn.write(content)
-            df = modin_pd.DataFrame(content)
-        else:
-            excel_dn.write_with_column_names(content, columns)
-            df = modin_pd.DataFrame(content, columns=columns)
-
-        assert np.array_equal(excel_dn.read().values, df.values)
-
-        excel_dn.write(None)
-        assert len(excel_dn.read()) == 0
-
     def test_read_multi_sheet_with_header(self):
     def test_read_multi_sheet_with_header(self):
         not_existing_excel = ExcelDataNode(
         not_existing_excel = ExcelDataNode(
             "foo",
             "foo",
@@ -617,37 +570,6 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.id == row_custom.id
                 assert row_custom_no_sheet_name.id == row_custom.id
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
                 assert row_custom_no_sheet_name.text == row_custom.text
-
-    @pytest.mark.modin
-    def test_read_multi_sheet_with_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
-        sheet_names = ["Sheet1", "Sheet2"]
-
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": sheet_names, "exposed_type": "modin"}
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(
-            len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            for sheet_name in sheet_names
-        )
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name))
-
-        excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"}
-        )
-
-        data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
     def test_read_multi_sheet_without_header(self):
     def test_read_multi_sheet_without_header(self):
         not_existing_excel = ExcelDataNode(
         not_existing_excel = ExcelDataNode(
             "foo",
             "foo",
@@ -852,34 +774,6 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
                 assert row_custom_no_sheet_name.text == row_custom.text
 
 
-    @pytest.mark.modin
-    def test_read_multi_sheet_without_header_modin(self):
-        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
-        sheet_names = ["Sheet1", "Sheet2"]
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names)
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name))
-
-        excel_data_node_as_modin_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -924,36 +818,6 @@ class TestExcelDataNode:
         read_data = excel_dn.read()
         read_data = excel_dn.read()
         assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names)
         assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names)
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content,columns",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}], None),
-            ([[11, 22, 33], [44, 55, 66]], None),
-            ([[11, 22, 33], [44, 55, 66]], ["e", "f", "g"]),
-        ],
-    )
-    def test_write_multi_sheet_with_modin(
-        self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content, columns
-    ):
-        sheet_names = ["Sheet1", "Sheet2"]
-
-        excel_dn = ExcelDataNode(
-            "foo",
-            Scope.SCENARIO,
-            properties={"path": excel_file_with_multi_sheet, "sheet_name": sheet_names, "exposed_type": "modin"},
-        )
-
-        for sheet_name in sheet_names:
-            assert np.array_equal(excel_dn.read()[sheet_name].values, default_multi_sheet_data_frame[sheet_name].values)
-
-        multi_sheet_content = {sheet_name: modin_pd.DataFrame(content) for sheet_name in sheet_names}
-
-        excel_dn.write(multi_sheet_content)
-
-        for sheet_name in sheet_names:
-            assert np.array_equal(excel_dn.read()[sheet_name].values, multi_sheet_content[sheet_name].values)
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -1058,103 +922,6 @@ class TestExcelDataNode:
         )
         )
         assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
         assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            (modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
-            ([[11, 22, 33], [44, 55, 66]]),
-        ],
-    )
-    def test_append_modin_with_sheetname(self, excel_file, default_data_frame, content):
-        dn = ExcelDataNode(
-            "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-        df_equals(dn.read(), modin_pd.DataFrame(default_data_frame))
-
-        dn.append(content)
-        df_equals(
-            dn.read(),
-            modin_pd.concat(
-                [modin_pd.DataFrame(default_data_frame), modin_pd.DataFrame(content, columns=["a", "b", "c"])]
-            ).reset_index(drop=True),
-        )
-
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            (modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
-            ([[11, 22, 33], [44, 55, 66]]),
-        ],
-    )
-    def test_append_modin_without_sheetname(self, excel_file, default_data_frame, content):
-        dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "modin"})
-        df_equals(dn.read()["Sheet1"], default_data_frame)
-
-        dn.append(content)
-        df_equals(
-            dn.read()["Sheet1"],
-            modin_pd.concat([default_data_frame, modin_pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(
-                drop=True
-            ),
-        )
-
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "content",
-        [
-            (
-                {
-                    "Sheet1": modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}]),
-                    "Sheet2": modin_pd.DataFrame([{"a": 44, "b": 55, "c": 66}]),
-                }
-            ),
-            (
-                {
-                    "Sheet1": modin_pd.DataFrame({"a": [11, 44], "b": [22, 55], "c": [33, 66]}),
-                    "Sheet2": modin_pd.DataFrame([{"a": 77, "b": 88, "c": 99}]),
-                }
-            ),
-            ({"Sheet1": np.array([[11, 22, 33], [44, 55, 66]]), "Sheet2": np.array([[77, 88, 99]])}),
-        ],
-    )
-    def test_append_modin_multisheet(self, excel_file_with_multi_sheet, default_multi_sheet_data_frame, content):
-        dn = ExcelDataNode(
-            "foo",
-            Scope.SCENARIO,
-            properties={
-                "path": excel_file_with_multi_sheet,
-                "sheet_name": ["Sheet1", "Sheet2"],
-                "exposed_type": "modin",
-            },
-        )
-        df_equals(dn.read()["Sheet1"], default_multi_sheet_data_frame["Sheet1"])
-        df_equals(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
-
-        dn.append(content)
-
-        df_equals(
-            dn.read()["Sheet1"],
-            modin_pd.concat(
-                [
-                    default_multi_sheet_data_frame["Sheet1"],
-                    modin_pd.DataFrame(content["Sheet1"], columns=["a", "b", "c"]),
-                ]
-            ).reset_index(drop=True),
-        )
-        df_equals(
-            dn.read()["Sheet2"],
-            modin_pd.concat(
-                [
-                    default_multi_sheet_data_frame["Sheet2"],
-                    modin_pd.DataFrame(content["Sheet2"], columns=["a", "b", "c"]),
-                ]
-            ).reset_index(drop=True),
-        )
-
     def test_filter_pandas_exposed_type_with_sheetname(self, excel_file):
     def test_filter_pandas_exposed_type_with_sheetname(self, excel_file):
         dn = ExcelDataNode(
         dn = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "pandas"}
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "pandas"}
@@ -1279,133 +1046,6 @@ class TestExcelDataNode:
         assert dn["sheet_1"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["sheet_1"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["sheet_2"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
         assert dn["sheet_2"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
 
 
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type_with_sheetname(self, excel_file):
-        dn = ExcelDataNode(
-            "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-        dn.write(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 1},
-                {"foo": 2, "bar": 2},
-                {"bar": 2},
-            ]
-        )
-
-        # Test datanode indexing and slicing
-        assert dn["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
-        assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
-
-        # Test filter data
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["foo"] == 1]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}, {"foo": 1.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
-        filtered_by_indexing = dn[dn["foo"] != 1]
-        expected_data = modin_pd.DataFrame([{"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("bar", 2, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["bar"] == 2]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 2.0}, {"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
-        filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
-        expected_data = modin_pd.DataFrame(
-            [
-                {"foo": 1.0, "bar": 1.0},
-                {"foo": 1.0, "bar": 2.0},
-                {"foo": 2.0, "bar": 2.0},
-                {"bar": 2.0},
-            ]
-        )
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type_without_sheetname(self, excel_file):
-        dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "modin"})
-        dn.write(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 1},
-                {"foo": 2, "bar": 2},
-                {"bar": 2},
-            ]
-        )
-
-        assert len(dn.filter(("foo", 1, Operator.EQUAL))["Sheet1"]) == 3
-        assert len(dn.filter(("foo", 1, Operator.NOT_EQUAL))["Sheet1"]) == 2
-        assert len(dn.filter(("bar", 2, Operator.EQUAL))["Sheet1"]) == 3
-        assert len(dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)["Sheet1"]) == 4
-
-        assert dn["Sheet1"]["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["Sheet1"]["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
-        assert dn["Sheet1"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
-
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type_multisheet(self, excel_file):
-        dn = ExcelDataNode(
-            "foo",
-            Scope.SCENARIO,
-            properties={"path": excel_file, "sheet_name": ["sheet_1", "sheet_2"], "exposed_type": "modin"},
-        )
-        dn.write(
-            {
-                "sheet_1": pd.DataFrame(
-                    [
-                        {"foo": 1, "bar": 1},
-                        {"foo": 1, "bar": 2},
-                        {"foo": 1},
-                        {"foo": 2, "bar": 2},
-                        {"bar": 2},
-                    ]
-                ),
-                "sheet_2": pd.DataFrame(
-                    [
-                        {"foo": 1, "bar": 3},
-                        {"foo": 1, "bar": 4},
-                        {"foo": 1},
-                        {"foo": 2, "bar": 4},
-                        {"bar": 4},
-                    ]
-                ),
-            }
-        )
-
-        assert len(dn.filter(("foo", 1, Operator.EQUAL))) == 2
-        assert len(dn.filter(("foo", 1, Operator.EQUAL))["sheet_1"]) == 3
-        assert len(dn.filter(("foo", 1, Operator.EQUAL))["sheet_2"]) == 3
-
-        assert len(dn.filter(("foo", 1, Operator.NOT_EQUAL))) == 2
-        assert len(dn.filter(("foo", 1, Operator.NOT_EQUAL))["sheet_1"]) == 2
-        assert len(dn.filter(("foo", 1, Operator.NOT_EQUAL))["sheet_2"]) == 2
-
-        assert len(dn.filter(("bar", 2, Operator.EQUAL))) == 2
-        assert len(dn.filter(("bar", 2, Operator.EQUAL))["sheet_1"]) == 3
-        assert len(dn.filter(("bar", 2, Operator.EQUAL))["sheet_2"]) == 0
-
-        assert len(dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)) == 2
-        assert len(dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)["sheet_1"]) == 4
-        assert len(dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)["sheet_2"]) == 0
-
-        assert dn["sheet_1"]["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["sheet_2"]["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["sheet_1"]["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
-        assert dn["sheet_2"]["bar"].equals(modin_pd.Series([3, 4, None, 4, 4]))
-        assert dn["sheet_1"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
-        assert dn["sheet_2"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
-
     def test_filter_numpy_exposed_type_with_sheetname(self, excel_file):
     def test_filter_numpy_exposed_type_with_sheetname(self, excel_file):
         dn = ExcelDataNode(
         dn = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "numpy"}
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "numpy"}

+ 9 - 117
tests/core/data/test_parquet_data_node.py

@@ -15,11 +15,9 @@ from datetime import datetime
 from importlib import util
 from importlib import util
 from time import sleep
 from time import sleep
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
-from modin.pandas.test.utils import df_equals
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -136,6 +134,15 @@ class TestParquetDataNode:
         dn = ParquetDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         dn = ParquetDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         assert os.path.exists(dn.path) is exists
         assert os.path.exists(dn.path) is exists
 
 
+    @pytest.mark.parametrize("engine", __engine)
+    def test_modin_deprecated_in_favor_of_pandas(self, engine, parquet_file_path):
+        # Create ParquetDataNode with modin exposed_type
+        props = {"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
+        parquet_data_node_as_modin = ParquetDataNode("bar", Scope.SCENARIO, properties=props)
+        assert parquet_data_node_as_modin.properties["exposed_type"] == "pandas"
+        data_modin = parquet_data_node_as_modin.read()
+        assert isinstance(data_modin, pd.DataFrame)
+
     @pytest.mark.parametrize("engine", __engine)
     @pytest.mark.parametrize("engine", __engine)
     def test_read_file(self, engine, parquet_file_path):
     def test_read_file(self, engine, parquet_file_path):
         not_existing_parquet = ParquetDataNode(
         not_existing_parquet = ParquetDataNode(
@@ -165,20 +172,6 @@ class TestParquetDataNode:
         assert len(data_numpy) == 2
         assert len(data_numpy) == 2
         assert np.array_equal(data_numpy, df.to_numpy())
         assert np.array_equal(data_numpy, df.to_numpy())
 
 
-    @pytest.mark.modin
-    @pytest.mark.parametrize("engine", __engine)
-    def test_read_file_modin(self, engine, parquet_file_path):
-        df = pd.read_parquet(parquet_file_path)
-        # Create ParquetDataNode with modin exposed_type
-        parquet_data_node_as_modin = ParquetDataNode(
-            "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
-        )
-        data_modin = parquet_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 2
-        assert data_modin.equals(df)
-        assert np.array_equal(data_modin.to_numpy(), df.to_numpy())
-
     @pytest.mark.parametrize("engine", __engine)
     @pytest.mark.parametrize("engine", __engine)
     def test_read_folder(self, engine):
     def test_read_folder(self, engine):
         parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
         parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
@@ -322,27 +315,6 @@ class TestParquetDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
         )
 
 
-    @pytest.mark.modin
-    @pytest.mark.skipif(not util.find_spec("fastparquet"), reason="Append parquet requires fastparquet to be installed")
-    @pytest.mark.parametrize(
-        "content",
-        [
-            ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            (pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
-        ],
-    )
-    def test_append_modin(self, parquet_file_path, default_data_frame, content):
-        dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin"})
-        df_equals(dn.read(), modin_pd.DataFrame(default_data_frame))
-
-        dn.append(content)
-        df_equals(
-            dn.read(),
-            modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(
-                drop=True
-            ),
-        )
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "data",
         "data",
         [
         [
@@ -358,36 +330,6 @@ class TestParquetDataNode:
         assert pathlib.Path(temp_file_path).exists()
         assert pathlib.Path(temp_file_path).exists()
         assert isinstance(dn.read(), pd.DataFrame)
         assert isinstance(dn.read(), pd.DataFrame)
 
 
-        @pytest.mark.modin
-        @pytest.mark.parametrize(
-            "data",
-            [
-                modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            ],
-        )
-        def test_write_to_disk_modin(self, tmpdir_factory, data):
-            temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
-            dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
-            dn.write(data)
-
-            assert pathlib.Path(temp_file_path).exists()
-            assert isinstance(dn.read(), pd.DataFrame)
-
-    @pytest.mark.modin
-    @pytest.mark.parametrize(
-        "data",
-        [
-            modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-        ],
-    )
-    def test_write_to_disk_modin(self, tmpdir_factory, data):
-        temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
-        dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
-        dn.write(data)
-
-        assert pathlib.Path(temp_file_path).exists()
-        assert isinstance(dn.read(), pd.DataFrame)
-
     def test_filter_pandas_exposed_type(self, parquet_file_path):
     def test_filter_pandas_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
         dn.write(
         dn.write(
@@ -437,56 +379,6 @@ class TestParquetDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type(self, parquet_file_path):
-        dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin"})
-        dn.write(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 1},
-                {"foo": 2, "bar": 2},
-                {"bar": 2},
-            ]
-        )
-
-        # Test datanode indexing and slicing
-        assert dn["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
-        assert dn["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
-        assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
-
-        # Test filter data
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["foo"] == 1]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}, {"foo": 1.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
-        filtered_by_indexing = dn[dn["foo"] != 1]
-        expected_data = modin_pd.DataFrame([{"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("bar", 2, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["bar"] == 2]
-        expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 2.0}, {"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
-        filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
-        expected_data = modin_pd.DataFrame(
-            [
-                {"foo": 1.0, "bar": 1.0},
-                {"foo": 1.0, "bar": 2.0},
-                {"foo": 2.0, "bar": 2.0},
-                {"bar": 2.0},
-            ]
-        )
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
     def test_filter_numpy_exposed_type(self, parquet_file_path):
     def test_filter_numpy_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy"})
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy"})
         dn.write(
         dn.write(

+ 0 - 34
tests/core/data/test_pickle_data_node.py

@@ -14,7 +14,6 @@ import pathlib
 from datetime import datetime
 from datetime import datetime
 from time import sleep
 from time import sleep
 
 
-import modin.pandas as modin_pd
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
 
 
@@ -123,39 +122,6 @@ class TestPickleDataNodeEntity:
         assert isinstance(pickle_dict.read(), dict)
         assert isinstance(pickle_dict.read(), dict)
         assert pickle_dict.read() == {"bar": 12, "baz": "qux", "quux": [13]}
         assert pickle_dict.read() == {"bar": 12, "baz": "qux", "quux": [13]}
 
 
-    @pytest.mark.modin
-    def test_read_and_write_modin(self):
-        default_pandas = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
-        new_pandas_df = pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
-        default_modin = modin_pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
-        new_modin_df = modin_pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
-
-        pickle_pandas = PickleDataNode("foo", Scope.SCENARIO, properties={"default_data": default_pandas})
-        assert isinstance(pickle_pandas.read(), pd.DataFrame)
-        assert default_pandas.equals(pickle_pandas.read())
-        pickle_pandas.write(new_pandas_df)
-        assert new_pandas_df.equals(pickle_pandas.read())
-        assert isinstance(pickle_pandas.read(), pd.DataFrame)
-        pickle_pandas.write(new_modin_df)
-        assert new_modin_df.equals(pickle_pandas.read())
-        assert isinstance(pickle_pandas.read(), modin_pd.DataFrame)
-        pickle_pandas.write(1998)
-        assert pickle_pandas.read() == 1998
-        assert isinstance(pickle_pandas.read(), int)
-
-        pickle_modin = PickleDataNode("foo", Scope.SCENARIO, properties={"default_data": default_modin})
-        assert isinstance(pickle_modin.read(), modin_pd.DataFrame)
-        assert default_modin.equals(pickle_modin.read())
-        pickle_modin.write(new_modin_df)
-        assert new_modin_df.equals(pickle_modin.read())
-        assert isinstance(pickle_modin.read(), modin_pd.DataFrame)
-        pickle_modin.write(new_pandas_df)
-        assert new_pandas_df.equals(pickle_modin.read())
-        assert isinstance(pickle_modin.read(), pd.DataFrame)
-        pickle_modin.write(1998)
-        assert pickle_modin.read() == 1998
-        assert isinstance(pickle_modin.read(), int)
-
     def test_path_overrides_default_path(self):
     def test_path_overrides_default_path(self):
         dn = PickleDataNode(
         dn = PickleDataNode(
             "foo",
             "foo",

+ 6 - 194
tests/core/data/test_sql_data_node.py

@@ -12,11 +12,9 @@
 from importlib import util
 from importlib import util
 from unittest.mock import patch
 from unittest.mock import patch
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
-from modin.pandas.test.utils import df_equals
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -38,22 +36,11 @@ def my_write_query_builder_with_pandas(data: pd.DataFrame):
     insert_data = data.to_dict("records")
     insert_data = data.to_dict("records")
     return ["DELETE FROM example", ("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
     return ["DELETE FROM example", ("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
 
 
-
-def my_write_query_builder_with_modin(data: modin_pd.DataFrame):
-    insert_data = data.to_dict("records")
-    return ["DELETE FROM example", ("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
-
-
 def my_append_query_builder_with_pandas(data: pd.DataFrame):
 def my_append_query_builder_with_pandas(data: pd.DataFrame):
     insert_data = data.to_dict("records")
     insert_data = data.to_dict("records")
     return [("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
     return [("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
 
 
 
 
-def my_append_query_builder_with_modin(data: modin_pd.DataFrame):
-    insert_data = data.to_dict("records")
-    return [("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
-
-
 def single_write_query_builder(data):
 def single_write_query_builder(data):
     return "DELETE FROM example"
     return "DELETE FROM example"
 
 
@@ -72,20 +59,6 @@ class TestSQLDataNode:
         },
         },
     ]
     ]
 
 
-    __modin_properties = [
-        {
-            "db_name": "taipy.sqlite3",
-            "db_engine": "sqlite",
-            "read_query": "SELECT * FROM example",
-            "write_query_builder": my_write_query_builder_with_modin,
-            "exposed_type": "modin",
-            "db_extra_args": {
-                "TrustServerCertificate": "yes",
-                "other": "value",
-            },
-        },
-    ]
-
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
         __pandas_properties.append(
         __pandas_properties.append(
             {
             {
@@ -100,20 +73,7 @@ class TestSQLDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mssql",
-                "read_query": "SELECT * FROM example",
-                "write_query_builder": my_write_query_builder_with_modin,
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
+
 
 
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
         __pandas_properties.append(
         __pandas_properties.append(
@@ -129,20 +89,7 @@ class TestSQLDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mysql",
-                "read_query": "SELECT * FROM example",
-                "write_query_builder": my_write_query_builder_with_modin,
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
+
 
 
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
         __pandas_properties.append(
         __pandas_properties.append(
@@ -158,24 +105,10 @@ class TestSQLDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "postgresql",
-                "read_query": "SELECT * FROM example",
-                "write_query_builder": my_write_query_builder_with_modin,
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
+
 
 
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    @pytest.mark.parametrize("modin_properties", __modin_properties)
-    def test_create(self, pandas_properties, modin_properties):
+    def test_create(self, pandas_properties):
         dn = SQLDataNode(
         dn = SQLDataNode(
             "foo_bar",
             "foo_bar",
             Scope.SCENARIO,
             Scope.SCENARIO,
@@ -193,24 +126,8 @@ class TestSQLDataNode:
         assert dn.read_query == "SELECT * FROM example"
         assert dn.read_query == "SELECT * FROM example"
         assert dn.write_query_builder == my_write_query_builder_with_pandas
         assert dn.write_query_builder == my_write_query_builder_with_pandas
 
 
-        dn = SQLDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties=modin_properties,
-        )
-        assert isinstance(dn, SQLDataNode)
-        assert dn.storage_type() == "sql"
-        assert dn.config_id == "foo_bar"
-        assert dn.scope == Scope.SCENARIO
-        assert dn.id is not None
-        assert dn.owner_id is None
-        assert dn.job_ids == []
-        assert dn.is_ready_for_reading
-        assert dn.exposed_type == "modin"
-        assert dn.read_query == "SELECT * FROM example"
-        assert dn.write_query_builder == my_write_query_builder_with_modin
 
 
-    @pytest.mark.parametrize("properties", __pandas_properties + __modin_properties)
+    @pytest.mark.parametrize("properties", __pandas_properties)
     def test_get_user_properties(self, properties):
     def test_get_user_properties(self, properties):
         custom_properties = properties.copy()
         custom_properties = properties.copy()
         custom_properties["foo"] = "bar"
         custom_properties["foo"] = "bar"
@@ -241,8 +158,7 @@ class TestSQLDataNode:
             SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
             SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
 
 
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    @pytest.mark.parametrize("modin_properties", __modin_properties)
-    def test_write_query_builder(self, pandas_properties, modin_properties):
+    def test_write_query_builder(self, pandas_properties):
         custom_properties = pandas_properties.copy()
         custom_properties = pandas_properties.copy()
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
@@ -268,30 +184,6 @@ class TestSQLDataNode:
             assert len(engine_mock.mock_calls[4].args) == 1
             assert len(engine_mock.mock_calls[4].args) == 1
             assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
             assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
 
 
-        custom_properties = modin_properties.copy()
-        custom_properties.pop("db_extra_args")
-        dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
-        with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
-            # mock connection execute
-            dn.write(modin_pd.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]}))
-            assert len(engine_mock.mock_calls[4].args) == 1
-            assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
-            assert len(engine_mock.mock_calls[5].args) == 2
-            assert engine_mock.mock_calls[5].args[0].text == "INSERT INTO example VALUES (:foo, :bar)"
-            assert engine_mock.mock_calls[5].args[1] == [
-                {"foo": 1, "bar": 4},
-                {"foo": 2, "bar": 5},
-                {"foo": 3, "bar": 6},
-            ]
-
-        custom_properties["write_query_builder"] = single_write_query_builder
-        dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
-
-        with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
-            # mock connection execute
-            dn.write(modin_pd.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]}))
-            assert len(engine_mock.mock_calls[4].args) == 1
-            assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
 
 
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "tmp_sqlite_path",
         "tmp_sqlite_path",
@@ -337,29 +229,6 @@ class TestSQLDataNode:
         dn.append(append_data_1)
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
 
-    @pytest.mark.modin
-    def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
-        folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
-        properties = {
-            "db_engine": "sqlite",
-            "read_query": "SELECT * FROM example",
-            "write_query_builder": my_write_query_builder_with_pandas,
-            "append_query_builder": my_append_query_builder_with_pandas,
-            "db_name": db_name,
-            "sqlite_folder_path": folder_path,
-            "sqlite_file_extension": file_extension,
-            "exposed_type": "modin",
-        }
-
-        dn = SQLDataNode("sqlite_dn", Scope.SCENARIO, properties=properties)
-        original_data = modin_pd.DataFrame([{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}])
-        data = dn.read()
-        df_equals(data, original_data)
-
-        append_data_1 = modin_pd.DataFrame([{"foo": 5, "bar": 6}, {"foo": 7, "bar": 8}])
-        dn.append(append_data_1)
-        df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True))
-
     def test_sqlite_append_without_append_query_builder(self, tmp_sqlite_sqlite3_file_path):
     def test_sqlite_append_without_append_query_builder(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {
@@ -431,63 +300,6 @@ class TestSQLDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
-        folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
-        properties = {
-            "db_engine": "sqlite",
-            "read_query": "SELECT * FROM example",
-            "write_query_builder": my_write_query_builder_with_modin,
-            "db_name": db_name,
-            "sqlite_folder_path": folder_path,
-            "sqlite_file_extension": file_extension,
-            "exposed_type": "modin",
-        }
-        dn = SQLDataNode("foo", Scope.SCENARIO, properties=properties)
-        dn.write(
-            pd.DataFrame(
-                [
-                    {"foo": 1, "bar": 1},
-                    {"foo": 1, "bar": 2},
-                    {"foo": 1, "bar": 3},
-                    {"foo": 2, "bar": 1},
-                    {"foo": 2, "bar": 2},
-                    {"foo": 2, "bar": 3},
-                ]
-            )
-        )
-
-        # Test datanode indexing and slicing
-        assert dn["foo"].equals(pd.Series([1, 1, 1, 2, 2, 2]))
-        assert dn["bar"].equals(pd.Series([1, 2, 3, 1, 2, 3]))
-        assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1, "bar": 1}, {"foo": 1, "bar": 2}]))
-
-        # Test filter data
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["foo"] == 1]
-        expected_data = modin_pd.DataFrame([{"foo": 1, "bar": 1}, {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
-        filtered_by_indexing = dn[dn["foo"] != 1]
-        expected_data = modin_pd.DataFrame([{"foo": 2, "bar": 1}, {"foo": 2, "bar": 2}, {"foo": 2, "bar": 3}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
-        filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
-        expected_data = modin_pd.DataFrame(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 2, "bar": 1},
-                {"foo": 2, "bar": 2},
-            ]
-        )
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
     def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {

+ 10 - 176
tests/core/data/test_sql_table_data_node.py

@@ -12,11 +12,9 @@
 from importlib import util
 from importlib import util
 from unittest.mock import patch
 from unittest.mock import patch
 
 
-import modin.pandas as modin_pd
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
 import pytest
 import pytest
-from modin.pandas.test.utils import df_equals
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -47,19 +45,6 @@ class TestSQLTableDataNode:
         },
         },
     ]
     ]
 
 
-    __modin_properties = [
-        {
-            "db_name": "taipy",
-            "db_engine": "sqlite",
-            "table_name": "example",
-            "exposed_type": "modin",
-            "db_extra_args": {
-                "TrustServerCertificate": "yes",
-                "other": "value",
-            },
-        },
-    ]
-
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
         __pandas_properties.append(
         __pandas_properties.append(
             {
             {
@@ -73,19 +58,6 @@ class TestSQLTableDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mssql",
-                "table_name": "example",
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
 
 
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
         __pandas_properties.append(
         __pandas_properties.append(
@@ -100,19 +72,6 @@ class TestSQLTableDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mysql",
-                "table_name": "example",
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
 
 
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
         __pandas_properties.append(
         __pandas_properties.append(
@@ -127,23 +86,9 @@ class TestSQLTableDataNode:
                 },
                 },
             },
             },
         )
         )
-        __modin_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "postgresql",
-                "table_name": "example",
-                "exposed_type": "modin",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
 
 
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    @pytest.mark.parametrize("modin_properties", __modin_properties)
-    def test_create(self, pandas_properties, modin_properties):
+    def test_create(self, pandas_properties):
         dn = SQLTableDataNode(
         dn = SQLTableDataNode(
             "foo_bar",
             "foo_bar",
             Scope.SCENARIO,
             Scope.SCENARIO,
@@ -161,23 +106,6 @@ class TestSQLTableDataNode:
         assert dn.table_name == "example"
         assert dn.table_name == "example"
         assert dn._get_base_read_query() == "SELECT * FROM example"
         assert dn._get_base_read_query() == "SELECT * FROM example"
 
 
-        dn = SQLTableDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties=modin_properties,
-        )
-        assert isinstance(dn, SQLTableDataNode)
-        assert dn.storage_type() == "sql_table"
-        assert dn.config_id == "foo_bar"
-        assert dn.scope == Scope.SCENARIO
-        assert dn.id is not None
-        assert dn.owner_id is None
-        assert dn.job_ids == []
-        assert dn.is_ready_for_reading
-        assert dn.exposed_type == "modin"
-        assert dn.table_name == "example"
-        assert dn._get_base_read_query() == "SELECT * FROM example"
-
     @pytest.mark.parametrize("properties", __pandas_properties)
     @pytest.mark.parametrize("properties", __pandas_properties)
     def test_get_user_properties(self, properties):
     def test_get_user_properties(self, properties):
         custom_properties = properties.copy()
         custom_properties = properties.copy()
@@ -204,20 +132,24 @@ class TestSQLTableDataNode:
         with pytest.raises(MissingRequiredProperty):
         with pytest.raises(MissingRequiredProperty):
             SQLTableDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
             SQLTableDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
 
 
+    @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe", return_value="pandas")
+    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
+    def test_modin_deprecated_in_favor_of_pandas(self, mock_read_as_pandas_dataframe, pandas_properties):
+        pandas_properties["exposed_type"] = "modin"
+        sql_data_node_as_modin = SQLTableDataNode("foo", Scope.SCENARIO, properties=pandas_properties)
+        assert sql_data_node_as_modin.properties["exposed_type"] == "pandas"
+        assert sql_data_node_as_modin.read() == "pandas"
+
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as", return_value="custom")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as", return_value="custom")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe", return_value="pandas")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe", return_value="pandas")
-    @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_modin_dataframe", return_value="modin")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_numpy", return_value="numpy")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_numpy", return_value="numpy")
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    @pytest.mark.parametrize("modin_properties", __modin_properties)
     def test_read(
     def test_read(
         self,
         self,
         mock_read_as,
         mock_read_as,
         mock_read_as_pandas_dataframe,
         mock_read_as_pandas_dataframe,
-        mock_read_as_modin_dataframe,
         mock_read_as_numpy,
         mock_read_as_numpy,
         pandas_properties,
         pandas_properties,
-        modin_properties,
     ):
     ):
         custom_properties = pandas_properties.copy()
         custom_properties = pandas_properties.copy()
         # Create SQLTableDataNode without exposed_type (Default is pandas.DataFrame)
         # Create SQLTableDataNode without exposed_type (Default is pandas.DataFrame)
@@ -241,10 +173,6 @@ class TestSQLTableDataNode:
 
 
         assert sql_data_source_as_numpy_object.read() == "numpy"
         assert sql_data_source_as_numpy_object.read() == "numpy"
 
 
-        # Create the same SQLDataSource but with modin exposed_type
-        sql_data_source_as_modin_object = SQLTableDataNode("foo", Scope.SCENARIO, properties=modin_properties)
-        assert sql_data_source_as_modin_object.properties["exposed_type"] == "modin"
-        assert sql_data_source_as_modin_object.read() == "modin"
 
 
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     def test_read_as(self, pandas_properties):
     def test_read_as(self, pandas_properties):
@@ -337,8 +265,7 @@ class TestSQLTableDataNode:
             SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
             SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
 
 
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
     @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    @pytest.mark.parametrize("modin_properties", __modin_properties)
-    def test_write_dataframe(self, pandas_properties, modin_properties):
+    def test_write_dataframe(self, pandas_properties):
         # test write pandas dataframe
         # test write pandas dataframe
         custom_properties = pandas_properties.copy()
         custom_properties = pandas_properties.copy()
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
@@ -355,22 +282,6 @@ class TestSQLTableDataNode:
                 dn.write(df)
                 dn.write(df)
                 assert mck.call_args[0][0].equals(df)
                 assert mck.call_args[0][0].equals(df)
 
 
-        # test write modin dataframe
-        custom_properties = modin_properties.copy()
-        custom_properties.pop("db_extra_args")
-        dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
-
-        df = modin_pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
-        with patch("sqlalchemy.engine.Engine.connect") as engine_mock, patch(
-            "taipy.core.data.sql_table.SQLTableDataNode._create_table"
-        ):
-            cursor_mock = engine_mock.return_value.__enter__.return_value
-            cursor_mock.execute.side_effect = None
-
-            with patch("taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe") as mck:
-                dn.write(df)
-                assert mck.call_args[0][0].equals(df)
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "data",
         "data",
         [
         [
@@ -465,27 +376,6 @@ class TestSQLTableDataNode:
         dn.append(append_data_1)
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
 
-    @pytest.mark.modin
-    def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
-        folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
-        properties = {
-            "db_engine": "sqlite",
-            "table_name": "example",
-            "db_name": db_name,
-            "sqlite_folder_path": folder_path,
-            "sqlite_file_extension": file_extension,
-            "exposed_type": "modin",
-        }
-
-        dn = SQLTableDataNode("sqlite_dn", Scope.SCENARIO, properties=properties)
-        original_data = modin_pd.DataFrame([{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}])
-        data = dn.read()
-        df_equals(data, original_data)
-
-        append_data_1 = modin_pd.DataFrame([{"foo": 5, "bar": 6}, {"foo": 7, "bar": 8}])
-        dn.append(append_data_1)
-        df_equals(dn.read(), modin_pd.concat([original_data, append_data_1]).reset_index(drop=True))
-
     def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {
@@ -541,62 +431,6 @@ class TestSQLTableDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
-    @pytest.mark.modin
-    def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
-        folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
-        properties = {
-            "db_engine": "sqlite",
-            "table_name": "example",
-            "db_name": db_name,
-            "sqlite_folder_path": folder_path,
-            "sqlite_file_extension": file_extension,
-            "exposed_type": "modin",
-        }
-        dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=properties)
-        dn.write(
-            pd.DataFrame(
-                [
-                    {"foo": 1, "bar": 1},
-                    {"foo": 1, "bar": 2},
-                    {"foo": 1, "bar": 3},
-                    {"foo": 2, "bar": 1},
-                    {"foo": 2, "bar": 2},
-                    {"foo": 2, "bar": 3},
-                ]
-            )
-        )
-
-        # Test datanode indexing and slicing
-        assert dn["foo"].equals(pd.Series([1, 1, 1, 2, 2, 2]))
-        assert dn["bar"].equals(pd.Series([1, 2, 3, 1, 2, 3]))
-        assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1, "bar": 1}, {"foo": 1, "bar": 2}]))
-
-        # Test filter data
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
-        filtered_by_indexing = dn[dn["foo"] == 1]
-        expected_data = modin_pd.DataFrame([{"foo": 1, "bar": 1}, {"foo": 1, "bar": 2}, {"foo": 1, "bar": 3}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
-        filtered_by_indexing = dn[dn["foo"] != 1]
-        expected_data = modin_pd.DataFrame([{"foo": 2, "bar": 1}, {"foo": 2, "bar": 2}, {"foo": 2, "bar": 3}])
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
-        filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
-        filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
-        expected_data = modin_pd.DataFrame(
-            [
-                {"foo": 1, "bar": 1},
-                {"foo": 1, "bar": 2},
-                {"foo": 2, "bar": 1},
-                {"foo": 2, "bar": 2},
-            ]
-        )
-        df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
-        df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
-
     def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_numpy_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {

+ 1 - 1
tools/packages/taipy-core/setup.requirements.txt

@@ -1,7 +1,7 @@
 pyarrow>=10.0.1,<11.0
 pyarrow>=10.0.1,<11.0
 networkx>=2.6,<3.0
 networkx>=2.6,<3.0
 openpyxl>=3.1.2,<3.2
 openpyxl>=3.1.2,<3.2
-modin[dask]>=0.23.1,<1.0
+pandas>=2.0.0,<3.0
 pymongo[srv]>=4.2.0,<5.0
 pymongo[srv]>=4.2.0,<5.0
 sqlalchemy>=2.0.16,<2.1
 sqlalchemy>=2.0.16,<2.1
 toml>=0.10,<0.11
 toml>=0.10,<0.11