فهرست منبع

Merge pull request #1431 from Avaiga/feature/#1344-refactor-code-to-support-polars-integration

feature/#1344 refactored to integrate polars
Toan Quach 10 ماه پیش
والد
کامیت
0782ef0923

+ 1 - 0
.gitignore

@@ -86,6 +86,7 @@ dist/
 .taipy/
 .taipy/
 user_data/
 user_data/
 .my_data/
 .my_data/
+Data
 
 
 # demo files
 # demo files
 demo-*
 demo-*

+ 17 - 15
taipy/core/data/_abstract_sql.py

@@ -154,24 +154,25 @@ class _AbstractSQLDataNode(DataNode, _TabularDataNodeMixin):
         return self._engine
         return self._engine
 
 
     def _conn_string(self) -> str:
     def _conn_string(self) -> str:
-        engine = self.properties.get(self.__DB_ENGINE_KEY)
+        properties = self.properties
+        engine = properties.get(self.__DB_ENGINE_KEY)
 
 
         if self.__DB_USERNAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
         if self.__DB_USERNAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
-            username = self.properties.get(self.__DB_USERNAME_KEY)
+            username = properties.get(self.__DB_USERNAME_KEY)
             username = urllib.parse.quote_plus(username)
             username = urllib.parse.quote_plus(username)
 
 
         if self.__DB_PASSWORD_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
         if self.__DB_PASSWORD_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
-            password = self.properties.get(self.__DB_PASSWORD_KEY)
+            password = properties.get(self.__DB_PASSWORD_KEY)
             password = urllib.parse.quote_plus(password)
             password = urllib.parse.quote_plus(password)
 
 
         if self.__DB_NAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
         if self.__DB_NAME_KEY in self._ENGINE_REQUIRED_PROPERTIES[engine]:
-            db_name = self.properties.get(self.__DB_NAME_KEY)
+            db_name = properties.get(self.__DB_NAME_KEY)
             db_name = urllib.parse.quote_plus(db_name)
             db_name = urllib.parse.quote_plus(db_name)
 
 
-        host = self.properties.get(self.__DB_HOST_KEY, self.__DB_HOST_DEFAULT)
-        port = self.properties.get(self.__DB_PORT_KEY, self.__DB_PORT_DEFAULT)
-        driver = self.properties.get(self.__DB_DRIVER_KEY, self.__DB_DRIVER_DEFAULT)
-        extra_args = self.properties.get(self.__DB_EXTRA_ARGS_KEY, {})
+        host = properties.get(self.__DB_HOST_KEY, self.__DB_HOST_DEFAULT)
+        port = properties.get(self.__DB_PORT_KEY, self.__DB_PORT_DEFAULT)
+        driver = properties.get(self.__DB_DRIVER_KEY, self.__DB_DRIVER_DEFAULT)
+        extra_args = properties.get(self.__DB_EXTRA_ARGS_KEY, {})
 
 
         if driver:
         if driver:
             extra_args = {**extra_args, "driver": driver}
             extra_args = {**extra_args, "driver": driver}
@@ -186,23 +187,24 @@ class _AbstractSQLDataNode(DataNode, _TabularDataNodeMixin):
         elif engine == self.__ENGINE_POSTGRESQL:
         elif engine == self.__ENGINE_POSTGRESQL:
             return f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{db_name}?{extra_args_str}"
             return f"postgresql+psycopg2://{username}:{password}@{host}:{port}/{db_name}?{extra_args_str}"
         elif engine == self.__ENGINE_SQLITE:
         elif engine == self.__ENGINE_SQLITE:
-            folder_path = self.properties.get(self.__SQLITE_FOLDER_PATH, self.__SQLITE_FOLDER_PATH_DEFAULT)
-            file_extension = self.properties.get(self.__SQLITE_FILE_EXTENSION, self.__SQLITE_FILE_EXTENSION_DEFAULT)
+            folder_path = properties.get(self.__SQLITE_FOLDER_PATH, self.__SQLITE_FOLDER_PATH_DEFAULT)
+            file_extension = properties.get(self.__SQLITE_FILE_EXTENSION, self.__SQLITE_FILE_EXTENSION_DEFAULT)
             return "sqlite:///" + os.path.join(folder_path, f"{db_name}{file_extension}")
             return "sqlite:///" + os.path.join(folder_path, f"{db_name}{file_extension}")
-
         raise UnknownDatabaseEngine(f"Unknown engine: {engine}")
         raise UnknownDatabaseEngine(f"Unknown engine: {engine}")
 
 
     def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
     def filter(self, operators: Optional[Union[List, Tuple]] = None, join_operator=JoinOperator.AND):
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
+        properties = self.properties
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator)
             return self._read_as_pandas_dataframe(operators=operators, join_operator=join_operator)
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy(operators=operators, join_operator=join_operator)
             return self._read_as_numpy(operators=operators, join_operator=join_operator)
         return self._read_as(operators=operators, join_operator=join_operator)
         return self._read_as(operators=operators, join_operator=join_operator)
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
+        properties = self.properties
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 

+ 2 - 3
taipy/core/data/_data_manager.py

@@ -32,11 +32,10 @@ from .data_node_id import DataNodeId
 
 
 
 
 class _DataManager(_Manager[DataNode], _VersionMixin):
 class _DataManager(_Manager[DataNode], _VersionMixin):
-    __DATA_NODE_CLASS_MAP = DataNode._class_map()  # type: ignore
+    _DATA_NODE_CLASS_MAP = DataNode._class_map()  # type: ignore
     _ENTITY_NAME = DataNode.__name__
     _ENTITY_NAME = DataNode.__name__
     _EVENT_ENTITY_TYPE = EventEntityType.DATA_NODE
     _EVENT_ENTITY_TYPE = EventEntityType.DATA_NODE
     _repository: _DataFSRepository
     _repository: _DataFSRepository
-    __NAME_KEY = "name"
 
 
     @classmethod
     @classmethod
     def _bulk_get_or_create(
     def _bulk_get_or_create(
@@ -102,7 +101,7 @@ class _DataManager(_Manager[DataNode], _VersionMixin):
             else:
             else:
                 storage_type = Config.sections[DataNodeConfig.name][_Config.DEFAULT_KEY].storage_type
                 storage_type = Config.sections[DataNodeConfig.name][_Config.DEFAULT_KEY].storage_type
 
 
-            return cls.__DATA_NODE_CLASS_MAP[storage_type](
+            return cls._DATA_NODE_CLASS_MAP[storage_type](
                 config_id=data_node_config.id,
                 config_id=data_node_config.id,
                 scope=data_node_config.scope or DataNodeConfig._DEFAULT_SCOPE,
                 scope=data_node_config.scope or DataNodeConfig._DEFAULT_SCOPE,
                 validity_period=data_node_config.validity_period,
                 validity_period=data_node_config.validity_period,

+ 2 - 2
taipy/core/data/_tabular_datanode_mixin.py

@@ -26,7 +26,7 @@ class _TabularDataNodeMixin(object):
     _EXPOSED_TYPE_NUMPY = "numpy"
     _EXPOSED_TYPE_NUMPY = "numpy"
     _EXPOSED_TYPE_PANDAS = "pandas"
     _EXPOSED_TYPE_PANDAS = "pandas"
     _EXPOSED_TYPE_MODIN = "modin"  # Deprecated in favor of pandas since 3.1.0
     _EXPOSED_TYPE_MODIN = "modin"  # Deprecated in favor of pandas since 3.1.0
-    __VALID_STRING_EXPOSED_TYPES = [_EXPOSED_TYPE_PANDAS, _EXPOSED_TYPE_NUMPY]
+    _VALID_STRING_EXPOSED_TYPES = [_EXPOSED_TYPE_PANDAS, _EXPOSED_TYPE_NUMPY]
 
 
     def __init__(self, **kwargs) -> None:
     def __init__(self, **kwargs) -> None:
         self._decoder: Union[Callable[[List[Any]], Any], Callable[[Dict[Any, Any]], Any]]
         self._decoder: Union[Callable[[List[Any]], Any], Callable[[Dict[Any, Any]], Any]]
@@ -66,7 +66,7 @@ class _TabularDataNodeMixin(object):
 
 
     @classmethod
     @classmethod
     def _check_exposed_type(cls, exposed_type):
     def _check_exposed_type(cls, exposed_type):
-        valid_string_exposed_types = cls.__VALID_STRING_EXPOSED_TYPES
+        valid_string_exposed_types = cls._VALID_STRING_EXPOSED_TYPES
         if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types:
         if isinstance(exposed_type, str) and exposed_type not in valid_string_exposed_types:
             raise InvalidExposedType(
             raise InvalidExposedType(
                 f"Invalid string exposed type {exposed_type}. Supported values are "
                 f"Invalid string exposed type {exposed_type}. Supported values are "

+ 6 - 4
taipy/core/data/aws_s3.py

@@ -147,15 +147,17 @@ class S3ObjectDataNode(DataNode):
         return cls.__STORAGE_TYPE
         return cls.__STORAGE_TYPE
 
 
     def _read(self):
     def _read(self):
+        properties = self.properties
         aws_s3_object = self._s3_client.get_object(
         aws_s3_object = self._s3_client.get_object(
-            Bucket=self.properties[self.__AWS_STORAGE_BUCKET_NAME],
-            Key=self.properties[self.__AWS_S3_OBJECT_KEY],
+            Bucket=properties[self.__AWS_STORAGE_BUCKET_NAME],
+            Key=properties[self.__AWS_S3_OBJECT_KEY],
         )
         )
         return aws_s3_object["Body"].read().decode("utf-8")
         return aws_s3_object["Body"].read().decode("utf-8")
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
+        properties = self.properties
         self._s3_client.put_object(
         self._s3_client.put_object(
-            Bucket=self.properties[self.__AWS_STORAGE_BUCKET_NAME],
-            Key=self.properties[self.__AWS_S3_OBJECT_KEY],
+            Bucket=properties[self.__AWS_STORAGE_BUCKET_NAME],
+            Key=properties[self.__AWS_S3_OBJECT_KEY],
             Body=data,
             Body=data,
         )
         )

+ 32 - 30
taipy/core/data/csv.py

@@ -137,15 +137,17 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         return cls.__STORAGE_TYPE
         return cls.__STORAGE_TYPE
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
+        properties = self.properties
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
+        if properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 
     def _read_as(self):
     def _read_as(self):
-        with open(self._path, encoding=self.properties[self.__ENCODING_KEY]) as csvFile:
-            if self.properties[self._HAS_HEADER_PROPERTY]:
+        properties = self.properties
+        with open(self._path, encoding=properties[self.__ENCODING_KEY]) as csvFile:
+            if properties[self._HAS_HEADER_PROPERTY]:
                 reader = csv.DictReader(csvFile)
                 reader = csv.DictReader(csvFile)
             else:
             else:
                 reader = csv.reader(csvFile)
                 reader = csv.reader(csvFile)
@@ -159,37 +161,40 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         self, usecols: Optional[List[int]] = None, column_names: Optional[List[str]] = None
         self, usecols: Optional[List[int]] = None, column_names: Optional[List[str]] = None
     ) -> pd.DataFrame:
     ) -> pd.DataFrame:
         try:
         try:
-            if self.properties[self._HAS_HEADER_PROPERTY]:
+            properties = self.properties
+            if properties[self._HAS_HEADER_PROPERTY]:
                 if column_names:
                 if column_names:
-                    return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])[column_names]
-                return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY])
+                    return pd.read_csv(self._path, encoding=properties[self.__ENCODING_KEY])[column_names]
+                return pd.read_csv(self._path, encoding=properties[self.__ENCODING_KEY])
             else:
             else:
                 if usecols:
                 if usecols:
                     return pd.read_csv(
                     return pd.read_csv(
-                        self._path, encoding=self.properties[self.__ENCODING_KEY], header=None, usecols=usecols
+                        self._path, encoding=properties[self.__ENCODING_KEY], header=None, usecols=usecols
                     )
                     )
-                return pd.read_csv(self._path, encoding=self.properties[self.__ENCODING_KEY], header=None)
+                return pd.read_csv(self._path, encoding=properties[self.__ENCODING_KEY], header=None)
         except pd.errors.EmptyDataError:
         except pd.errors.EmptyDataError:
             return pd.DataFrame()
             return pd.DataFrame()
 
 
     def _append(self, data: Any):
     def _append(self, data: Any):
-        if isinstance(data, pd.DataFrame):
-            data.to_csv(self._path, mode="a", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False)
-        else:
-            pd.DataFrame(data).to_csv(
-                self._path, mode="a", index=False, encoding=self.properties[self.__ENCODING_KEY], header=False
-            )
-
-    def _write(self, data: Any):
-        exposed_type = self.properties[self._EXPOSED_TYPE_PROPERTY]
-        if self.properties[self._HAS_HEADER_PROPERTY]:
-            self._convert_data_to_dataframe(exposed_type, data).to_csv(
-                self._path, index=False, encoding=self.properties[self.__ENCODING_KEY]
-            )
-        else:
-            self._convert_data_to_dataframe(exposed_type, data).to_csv(
-                self._path, index=False, encoding=self.properties[self.__ENCODING_KEY], header=False
-            )
+        properties = self.properties
+        exposed_type = properties[self._EXPOSED_TYPE_PROPERTY]
+        data = self._convert_data_to_dataframe(exposed_type, data)
+        data.to_csv(self._path, mode="a", index=False, encoding=properties[self.__ENCODING_KEY], header=False)
+
+    def _write(self, data: Any, columns: Optional[List[str]] = None):
+        properties = self.properties
+        exposed_type = properties[self._EXPOSED_TYPE_PROPERTY]
+        data = self._convert_data_to_dataframe(exposed_type, data)
+
+        if columns and isinstance(data, pd.DataFrame):
+            data.columns = pd.Index(columns, dtype="object")
+
+        data.to_csv(
+            self._path,
+            index=False,
+            encoding=properties[self.__ENCODING_KEY],
+            header=properties[self._HAS_HEADER_PROPERTY],
+        )
 
 
     def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None):
     def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None):
         """Write a selection of columns.
         """Write a selection of columns.
@@ -199,8 +204,5 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             columns (Optional[List[str]]): The list of column names to write.
             columns (Optional[List[str]]): The list of column names to write.
             job_id (JobId^): An optional identifier of the writer.
             job_id (JobId^): An optional identifier of the writer.
         """
         """
-        df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
-        if columns and isinstance(df, pd.DataFrame):
-            df.columns = pd.Index(columns, dtype="object")
-        df.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
+        self._write(data, columns)
         self.track_edit(timestamp=datetime.now(), job_id=job_id)
         self.track_edit(timestamp=datetime.now(), job_id=job_id)

+ 49 - 32
taipy/core/data/excel.py

@@ -150,20 +150,31 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                 _TabularDataNodeMixin._check_exposed_type(t)
                 _TabularDataNodeMixin._check_exposed_type(t)
 
 
     def _read(self):
     def _read(self):
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
+        exposed_type = self.properties[self._EXPOSED_TYPE_PROPERTY]
+        if exposed_type == self._EXPOSED_TYPE_PANDAS:
             return self._read_as_pandas_dataframe()
             return self._read_as_pandas_dataframe()
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
+        if exposed_type == self._EXPOSED_TYPE_NUMPY:
             return self._read_as_numpy()
             return self._read_as_numpy()
         return self._read_as()
         return self._read_as()
 
 
+    def _read_sheet_with_exposed_type(
+        self, sheet_exposed_type: str, sheet_name: str
+    ) -> Optional[Union[np.ndarray, pd.DataFrame]]:
+        if sheet_exposed_type == self._EXPOSED_TYPE_NUMPY:
+            return self._read_as_pandas_dataframe(sheet_name).to_numpy()  # type: ignore
+        elif sheet_exposed_type == self._EXPOSED_TYPE_PANDAS:
+            return self._read_as_pandas_dataframe(sheet_name)
+        return None
+
     def _read_as(self):
     def _read_as(self):
         try:
         try:
+            properties = self.properties
             excel_file = load_workbook(self._path)
             excel_file = load_workbook(self._path)
-            exposed_type = self.properties[self._EXPOSED_TYPE_PROPERTY]
+            exposed_type = properties[self._EXPOSED_TYPE_PROPERTY]
             work_books = {}
             work_books = {}
             sheet_names = excel_file.sheetnames
             sheet_names = excel_file.sheetnames
 
 
-            user_provided_sheet_names = self.properties.get(self.__SHEET_NAME_PROPERTY) or []
+            user_provided_sheet_names = properties.get(self.__SHEET_NAME_PROPERTY) or []
             if not isinstance(user_provided_sheet_names, (List, Set, Tuple)):
             if not isinstance(user_provided_sheet_names, (List, Set, Tuple)):
                 user_provided_sheet_names = [user_provided_sheet_names]
                 user_provided_sheet_names = [user_provided_sheet_names]
 
 
@@ -174,10 +185,9 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                     raise NonExistingExcelSheet(sheet_name, self._path)
                     raise NonExistingExcelSheet(sheet_name, self._path)
 
 
             if isinstance(exposed_type, List):
             if isinstance(exposed_type, List):
-                if len(provided_sheet_names) != len(self.properties[self._EXPOSED_TYPE_PROPERTY]):
+                if len(provided_sheet_names) != len(exposed_type):
                     raise ExposedTypeLengthMismatch(
                     raise ExposedTypeLengthMismatch(
-                        f"Expected {len(provided_sheet_names)} exposed types, got "
-                        f"{len(self.properties[self._EXPOSED_TYPE_PROPERTY])}"
+                        f"Expected {len(provided_sheet_names)} exposed types, got " f"{len(exposed_type)}"
                     )
                     )
 
 
             for i, sheet_name in enumerate(provided_sheet_names):
             for i, sheet_name in enumerate(provided_sheet_names):
@@ -191,14 +201,13 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                         sheet_exposed_type = exposed_type[i]
                         sheet_exposed_type = exposed_type[i]
 
 
                     if isinstance(sheet_exposed_type, str):
                     if isinstance(sheet_exposed_type, str):
-                        if sheet_exposed_type == self._EXPOSED_TYPE_NUMPY:
-                            work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name).to_numpy()
-                        elif sheet_exposed_type == self._EXPOSED_TYPE_PANDAS:
-                            work_books[sheet_name] = self._read_as_pandas_dataframe(sheet_name)
+                        sheet_data = self._read_sheet_with_exposed_type(sheet_exposed_type, sheet_name)
+                        if sheet_data is not None:
+                            work_books[sheet_name] = sheet_data
                         continue
                         continue
 
 
                 res = [[col.value for col in row] for row in work_sheet.rows]
                 res = [[col.value for col in row] for row in work_sheet.rows]
-                if self.properties[self._HAS_HEADER_PROPERTY] and res:
+                if properties[self._HAS_HEADER_PROPERTY] and res:
                     header = res.pop(0)
                     header = res.pop(0)
                     for i, row in enumerate(res):
                     for i, row in enumerate(res):
                         res[i] = sheet_exposed_type(**dict([[h, r] for h, r in zip(header, row)]))
                         res[i] = sheet_exposed_type(**dict([[h, r] for h, r in zip(header, row)]))
@@ -225,9 +234,10 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
 
 
     def __get_sheet_names_and_header(self, sheet_names):
     def __get_sheet_names_and_header(self, sheet_names):
         kwargs = {}
         kwargs = {}
+        properties = self.properties
         if sheet_names is None:
         if sheet_names is None:
-            sheet_names = self.properties[self.__SHEET_NAME_PROPERTY]
-        if not self.properties[self._HAS_HEADER_PROPERTY]:
+            sheet_names = properties[self.__SHEET_NAME_PROPERTY]
+        if not properties[self._HAS_HEADER_PROPERTY]:
             kwargs["header"] = None
             kwargs["header"] = None
         return sheet_names, kwargs
         return sheet_names, kwargs
 
 
@@ -238,7 +248,7 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         except pd.errors.EmptyDataError:
         except pd.errors.EmptyDataError:
             return pd.DataFrame()
             return pd.DataFrame()
 
 
-    def __append_excel_with_single_sheet(self, append_excel_fct, *args, **kwargs):
+    def _append_excel_with_single_sheet(self, append_excel_fct, *args, **kwargs):
         sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY)
         sheet_name = self.properties.get(self.__SHEET_NAME_PROPERTY)
 
 
         with pd.ExcelWriter(self._path, mode="a", engine="openpyxl", if_sheet_exists="overlay") as writer:
         with pd.ExcelWriter(self._path, mode="a", engine="openpyxl", if_sheet_exists="overlay") as writer:
@@ -252,7 +262,12 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                 sheet_name = list(writer.sheets.keys())[0]
                 sheet_name = list(writer.sheets.keys())[0]
                 append_excel_fct(writer, *args, **kwargs, startrow=writer.sheets[sheet_name].max_row)
                 append_excel_fct(writer, *args, **kwargs, startrow=writer.sheets[sheet_name].max_row)
 
 
-    def __append_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None):
+    def _set_column_if_dataframe(self, data: Any, columns) -> Union[pd.DataFrame, Any]:
+        if isinstance(data, pd.DataFrame):
+            data.columns = pd.Index(columns, dtype="object")
+        return data
+
+    def _append_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None):
         with pd.ExcelWriter(self._path, mode="a", engine="openpyxl", if_sheet_exists="overlay") as writer:
         with pd.ExcelWriter(self._path, mode="a", engine="openpyxl", if_sheet_exists="overlay") as writer:
             # Each key stands for a sheet name
             # Each key stands for a sheet name
             for sheet_name in data.keys():
             for sheet_name in data.keys():
@@ -262,7 +277,7 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                     df = data[sheet_name]
                     df = data[sheet_name]
 
 
                 if columns:
                 if columns:
-                    data[sheet_name].columns = columns
+                    df = self._set_column_if_dataframe(df, columns)
 
 
                 df.to_excel(
                 df.to_excel(
                     writer, sheet_name=sheet_name, index=False, header=False, startrow=writer.sheets[sheet_name].max_row
                     writer, sheet_name=sheet_name, index=False, header=False, startrow=writer.sheets[sheet_name].max_row
@@ -275,13 +290,13 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             raise ImportError("The append method is only available for pandas version 1.4 or higher.")
             raise ImportError("The append method is only available for pandas version 1.4 or higher.")
 
 
         if isinstance(data, Dict) and all(isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()):
         if isinstance(data, Dict) and all(isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()):
-            self.__append_excel_with_multiple_sheets(data)
+            self._append_excel_with_multiple_sheets(data)
         elif isinstance(data, pd.DataFrame):
         elif isinstance(data, pd.DataFrame):
-            self.__append_excel_with_single_sheet(data.to_excel, index=False, header=False)
+            self._append_excel_with_single_sheet(data.to_excel, index=False, header=False)
         else:
         else:
-            self.__append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False)
+            self._append_excel_with_single_sheet(pd.DataFrame(data).to_excel, index=False, header=False)
 
 
-    def __write_excel_with_single_sheet(self, write_excel_fct, *args, **kwargs):
+    def _write_excel_with_single_sheet(self, write_excel_fct, *args, **kwargs):
         if sheet_name := self.properties.get(self.__SHEET_NAME_PROPERTY):
         if sheet_name := self.properties.get(self.__SHEET_NAME_PROPERTY):
             if not isinstance(sheet_name, str):
             if not isinstance(sheet_name, str):
                 if len(sheet_name) > 1:
                 if len(sheet_name) > 1:
@@ -292,24 +307,26 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         else:
         else:
             write_excel_fct(*args, **kwargs)
             write_excel_fct(*args, **kwargs)
 
 
-    def __write_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None):
+    def _write_excel_with_multiple_sheets(self, data: Any, columns: List[str] = None):
         with pd.ExcelWriter(self._path) as writer:
         with pd.ExcelWriter(self._path) as writer:
             # Each key stands for a sheet name
             # Each key stands for a sheet name
+            properties = self.properties
             for key in data.keys():
             for key in data.keys():
-                df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data[key])
+                df = self._convert_data_to_dataframe(properties[self._EXPOSED_TYPE_PROPERTY], data[key])
 
 
                 if columns:
                 if columns:
-                    data[key].columns = columns
+                    df = self._set_column_if_dataframe(df, columns)
 
 
-                df.to_excel(writer, key, index=False, header=self.properties[self._HAS_HEADER_PROPERTY] or False)
+                df.to_excel(writer, key, index=False, header=properties[self._HAS_HEADER_PROPERTY] or False)
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
         if isinstance(data, Dict):
         if isinstance(data, Dict):
-            return self.__write_excel_with_multiple_sheets(data)
+            return self._write_excel_with_multiple_sheets(data)
         else:
         else:
-            data = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
-            self.__write_excel_with_single_sheet(
-                data.to_excel, self._path, index=False, header=self.properties[self._HAS_HEADER_PROPERTY] or None
+            properties = self.properties
+            data = self._convert_data_to_dataframe(properties[self._EXPOSED_TYPE_PROPERTY], data)
+            self._write_excel_with_single_sheet(
+                data.to_excel, self._path, index=False, header=properties[self._HAS_HEADER_PROPERTY] or None
             )
             )
 
 
     def write_with_column_names(self, data: Any, columns: List[str] = None, job_id: Optional[JobId] = None):
     def write_with_column_names(self, data: Any, columns: List[str] = None, job_id: Optional[JobId] = None):
@@ -321,10 +338,10 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             job_id (JobId^): An optional identifier of the writer.
             job_id (JobId^): An optional identifier of the writer.
         """
         """
         if isinstance(data, Dict) and all(isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()):
         if isinstance(data, Dict) and all(isinstance(x, (pd.DataFrame, np.ndarray)) for x in data.values()):
-            self.__write_excel_with_multiple_sheets(data, columns=columns)
+            self._write_excel_with_multiple_sheets(data, columns=columns)
         else:
         else:
             df = pd.DataFrame(data)
             df = pd.DataFrame(data)
             if columns:
             if columns:
-                df.columns = pd.Index(columns, dtype="object")
-            self.__write_excel_with_single_sheet(df.to_excel, self.path, index=False)
+                df = self._set_column_if_dataframe(df, columns)
+            self._write_excel_with_single_sheet(df.to_excel, self.path, index=False)
         self.track_edit(timestamp=datetime.now(), job_id=job_id)
         self.track_edit(timestamp=datetime.now(), job_id=job_id)

+ 7 - 5
taipy/core/data/generic.py

@@ -108,7 +108,7 @@ class GenericDataNode(DataNode):
             editor_expiration_date,
             editor_expiration_date,
             **properties,
             **properties,
         )
         )
-        if not self._last_edit_date:
+        if not self._last_edit_date:  # type: ignore
             self._last_edit_date = datetime.now()
             self._last_edit_date = datetime.now()
 
 
         self._TAIPY_PROPERTIES.update(
         self._TAIPY_PROPERTIES.update(
@@ -125,8 +125,9 @@ class GenericDataNode(DataNode):
         return cls.__STORAGE_TYPE
         return cls.__STORAGE_TYPE
 
 
     def _read(self):
     def _read(self):
-        if read_fct := self.properties[self._OPTIONAL_READ_FUNCTION_PROPERTY]:
-            if read_fct_args := self.properties.get(self.__READ_FUNCTION_ARGS_PROPERTY, None):
+        properties = self.properties
+        if read_fct := properties[self._OPTIONAL_READ_FUNCTION_PROPERTY]:
+            if read_fct_args := properties.get(self.__READ_FUNCTION_ARGS_PROPERTY, None):
                 if not isinstance(read_fct_args, list):
                 if not isinstance(read_fct_args, list):
                     return read_fct(*[read_fct_args])
                     return read_fct(*[read_fct_args])
                 return read_fct(*read_fct_args)
                 return read_fct(*read_fct_args)
@@ -134,8 +135,9 @@ class GenericDataNode(DataNode):
         raise MissingReadFunction(f"The read function is not defined in data node config {self.config_id}.")
         raise MissingReadFunction(f"The read function is not defined in data node config {self.config_id}.")
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
-        if write_fct := self.properties[self._OPTIONAL_WRITE_FUNCTION_PROPERTY]:
-            if write_fct_args := self.properties.get(self.__WRITE_FUNCTION_ARGS_PROPERTY, None):
+        properties = self.properties
+        if write_fct := properties[self._OPTIONAL_WRITE_FUNCTION_PROPERTY]:
+            if write_fct_args := properties.get(self.__WRITE_FUNCTION_ARGS_PROPERTY, None):
                 if not isinstance(write_fct_args, list):
                 if not isinstance(write_fct_args, list):
                     return write_fct(data, *[write_fct_args])
                     return write_fct(data, *[write_fct_args])
                 return write_fct(data, *write_fct_args)
                 return write_fct(data, *write_fct_args)

+ 22 - 13
taipy/core/data/parquet.py

@@ -11,7 +11,7 @@
 
 
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
 from os.path import isdir, isfile
 from os.path import isdir, isfile
-from typing import Any, Dict, List, Optional, Set
+from typing import Any, Dict, List, Optional, Set, Union
 
 
 import numpy as np
 import numpy as np
 import pandas as pd
 import pandas as pd
@@ -157,7 +157,10 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         with _Reloader():
         with _Reloader():
             self._write_default_data(default_value)
             self._write_default_data(default_value)
 
 
-        if not self._last_edit_date and (isfile(self._path) or isdir(self._path)):
+        if (
+            not self._last_edit_date  # type: ignore
+            and (isfile(self._path) or isdir(self._path[:-1] if self._path.endswith("*") else self._path))
+        ):
             self._last_edit_date = datetime.now()
             self._last_edit_date = datetime.now()
         self._TAIPY_PROPERTIES.update(
         self._TAIPY_PROPERTIES.update(
             {
             {
@@ -208,14 +211,15 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             **write_kwargs (dict[str, any]): The keyword arguments passed to the function
             **write_kwargs (dict[str, any]): The keyword arguments passed to the function
                 `pandas.DataFrame.to_parquet()`.
                 `pandas.DataFrame.to_parquet()`.
         """
         """
+        properties = self.properties
         kwargs = {
         kwargs = {
-            self.__ENGINE_PROPERTY: self.properties[self.__ENGINE_PROPERTY],
-            self.__COMPRESSION_PROPERTY: self.properties[self.__COMPRESSION_PROPERTY],
+            self.__ENGINE_PROPERTY: properties[self.__ENGINE_PROPERTY],
+            self.__COMPRESSION_PROPERTY: properties[self.__COMPRESSION_PROPERTY],
         }
         }
-        kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY])
+        kwargs.update(properties[self.__WRITE_KWARGS_PROPERTY])
         kwargs.update(write_kwargs)
         kwargs.update(write_kwargs)
 
 
-        df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
+        df = self._convert_data_to_dataframe(properties[self._EXPOSED_TYPE_PROPERTY], data)
         if isinstance(df, pd.Series):
         if isinstance(df, pd.Series):
             df = pd.DataFrame(df)
             df = pd.DataFrame(df)
 
 
@@ -240,16 +244,21 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             )
             )
             return None
             return None
 
 
-        kwargs = self.properties[self.__READ_KWARGS_PROPERTY]
+        properties = self.properties
+        exposed_type = properties[self._EXPOSED_TYPE_PROPERTY]
+        kwargs = properties[self.__READ_KWARGS_PROPERTY]
         kwargs.update(
         kwargs.update(
             {
             {
-                self.__ENGINE_PROPERTY: self.properties[self.__ENGINE_PROPERTY],
+                self.__ENGINE_PROPERTY: properties[self.__ENGINE_PROPERTY],
             }
             }
         )
         )
         kwargs.update(read_kwargs)
         kwargs.update(read_kwargs)
 
 
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_PANDAS:
-            return self._read_as_pandas_dataframe(kwargs)
-        if self.properties[self._EXPOSED_TYPE_PROPERTY] == self._EXPOSED_TYPE_NUMPY:
-            return self._read_as_numpy(kwargs)
-        return self._read_as(kwargs)
+        return self._do_read_with_kwargs(exposed_type, kwargs)
+
+    def _do_read_with_kwargs(self, exposed_type, read_kwargs) -> Union[pd.DataFrame, np.ndarray, List]:
+        if exposed_type == self._EXPOSED_TYPE_PANDAS:
+            return self._read_as_pandas_dataframe(read_kwargs)
+        if exposed_type == self._EXPOSED_TYPE_NUMPY:
+            return self._read_as_numpy(read_kwargs)
+        return self._read_as(read_kwargs)

+ 3 - 2
taipy/core/data/sql.py

@@ -131,10 +131,11 @@ class SQLDataNode(_AbstractSQLDataNode):
         return self.properties.get(self.__READ_QUERY_KEY)
         return self.properties.get(self.__READ_QUERY_KEY)
 
 
     def _do_append(self, data, engine, connection) -> None:
     def _do_append(self, data, engine, connection) -> None:
-        if not self.properties.get(self._APPEND_QUERY_BUILDER_KEY):
+        append_query_builder_fct = self.properties.get(self._APPEND_QUERY_BUILDER_KEY)
+        if not append_query_builder_fct:
             raise MissingAppendQueryBuilder
             raise MissingAppendQueryBuilder
 
 
-        queries = self.properties.get(self._APPEND_QUERY_BUILDER_KEY)(data)
+        queries = append_query_builder_fct(data)
         self.__execute_queries(queries, connection)
         self.__execute_queries(queries, connection)
 
 
     def _do_write(self, data, engine, connection) -> None:
     def _do_write(self, data, engine, connection) -> None:

+ 5 - 5
taipy/core/data/sql_table.py

@@ -122,7 +122,7 @@ class SQLTableDataNode(_AbstractSQLDataNode):
 
 
     def __insert_data(self, data, engine, connection, delete_table: bool = False) -> None:
     def __insert_data(self, data, engine, connection, delete_table: bool = False) -> None:
         table = self._create_table(engine)
         table = self._create_table(engine)
-        self.__insert_dataframe(
+        self._insert_dataframe(
             self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data),
             self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data),
             table,
             table,
             connection,
             connection,
@@ -137,7 +137,7 @@ class SQLTableDataNode(_AbstractSQLDataNode):
         )
         )
 
 
     @classmethod
     @classmethod
-    def __insert_dicts(cls, data: List[Dict], table: Any, connection: Any, delete_table: bool) -> None:
+    def _insert_dicts(cls, data: List[Dict], table: Any, connection: Any, delete_table: bool) -> None:
         """
         """
         This method will insert the data contained in a list of dictionaries into a table. The query itself is handled
         This method will insert the data contained in a list of dictionaries into a table. The query itself is handled
         by SQLAlchemy, so it's only needed to pass the correct data type.
         by SQLAlchemy, so it's only needed to pass the correct data type.
@@ -146,14 +146,14 @@ class SQLTableDataNode(_AbstractSQLDataNode):
         connection.execute(table.insert(), data)
         connection.execute(table.insert(), data)
 
 
     @classmethod
     @classmethod
-    def __insert_dataframe(
+    def _insert_dataframe(
         cls, df: Union[pd.DataFrame, pd.Series], table: Any, connection: Any, delete_table: bool
         cls, df: Union[pd.DataFrame, pd.Series], table: Any, connection: Any, delete_table: bool
-    ) -> None:
+        ) -> None:
         if isinstance(df, pd.Series):
         if isinstance(df, pd.Series):
             data = [df.to_dict()]
             data = [df.to_dict()]
         elif isinstance(df, pd.DataFrame):
         elif isinstance(df, pd.DataFrame):
             data = df.to_dict(orient="records")
             data = df.to_dict(orient="records")
-        cls.__insert_dicts(data, table, connection, delete_table)
+        cls._insert_dicts(data, table, connection, delete_table)
 
 
     @classmethod
     @classmethod
     def __delete_all_rows(cls, table: Any, connection: Any, delete_table: bool) -> None:
     def __delete_all_rows(cls, table: Any, connection: Any, delete_table: bool) -> None:

+ 4 - 6
tests/core/data/test_aws_s3_data_node.py

@@ -13,7 +13,9 @@ import boto3
 import pytest
 import pytest
 from moto import mock_s3
 from moto import mock_s3
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.aws_s3 import S3ObjectDataNode
 from taipy.core.data.aws_s3 import S3ObjectDataNode
 
 
 
 
@@ -29,14 +31,10 @@ class TestS3ObjectDataNode:
         }
         }
     ]
     ]
 
 
-    @mock_s3
     @pytest.mark.parametrize("properties", __properties)
     @pytest.mark.parametrize("properties", __properties)
     def test_create(self, properties):
     def test_create(self, properties):
-        aws_s3_object_dn = S3ObjectDataNode(
-            "foo_bar_aws_s3",
-            Scope.SCENARIO,
-            properties=properties,
-        )
+        s3_object_dn_config = Config.configure_s3_object_data_node(id="foo_bar_aws_s3", **properties)
+        aws_s3_object_dn = _DataManagerFactory._build_manager()._create_and_set(s3_object_dn_config, None, None)
         assert isinstance(aws_s3_object_dn, S3ObjectDataNode)
         assert isinstance(aws_s3_object_dn, S3ObjectDataNode)
         assert aws_s3_object_dn.storage_type() == "s3_object"
         assert aws_s3_object_dn.storage_type() == "s3_object"
         assert aws_s3_object_dn.config_id == "foo_bar_aws_s3"
         assert aws_s3_object_dn.config_id == "foo_bar_aws_s3"

+ 26 - 5
tests/core/data/test_csv_data_node.py

@@ -9,6 +9,7 @@
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # specific language governing permissions and limitations under the License.
 # specific language governing permissions and limitations under the License.
 
 
+import dataclasses
 import os
 import os
 import pathlib
 import pathlib
 import uuid
 import uuid
@@ -22,6 +23,7 @@ from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.csv import CSVDataNode
 from taipy.core.data.csv import CSVDataNode
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.exceptions.exceptions import InvalidExposedType
 from taipy.core.exceptions.exceptions import InvalidExposedType
@@ -35,12 +37,20 @@ def cleanup():
         os.remove(path)
         os.remove(path)
 
 
 
 
+@dataclasses.dataclass
+class MyCustomObject:
+    id: int
+    integer: int
+    text: str
+
+
 class TestCSVDataNode:
 class TestCSVDataNode:
     def test_create(self):
     def test_create(self):
-        path = "data/node/path"
-        dn = CSVDataNode(
-            "foo_bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"}
+        default_path = "data/node/path"
+        csv_dn_config = Config.configure_csv_data_node(
+            id="foo_bar", default_path=default_path, has_header=False, name="super name"
         )
         )
+        dn = _DataManagerFactory._build_manager()._create_and_set(csv_dn_config, None, None)
         assert isinstance(dn, CSVDataNode)
         assert isinstance(dn, CSVDataNode)
         assert dn.storage_type() == "csv"
         assert dn.storage_type() == "csv"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -51,12 +61,23 @@ class TestCSVDataNode:
         assert dn.last_edit_date is None
         assert dn.last_edit_date is None
         assert dn.job_ids == []
         assert dn.job_ids == []
         assert not dn.is_ready_for_reading
         assert not dn.is_ready_for_reading
-        assert dn.path == path
+        assert dn.path == default_path
         assert dn.has_header is False
         assert dn.has_header is False
         assert dn.exposed_type == "pandas"
         assert dn.exposed_type == "pandas"
 
 
+        csv_dn_config = Config.configure_csv_data_node(
+            id="foo", default_path=default_path, has_header=True, exposed_type=MyCustomObject
+        )
+        dn = _DataManagerFactory._build_manager()._create_and_set(csv_dn_config, None, None)
+        assert dn.storage_type() == "csv"
+        assert dn.config_id == "foo"
+        assert dn.has_header is True
+        assert dn.exposed_type == MyCustomObject
+
         with pytest.raises(InvalidConfigurationId):
         with pytest.raises(InvalidConfigurationId):
-            CSVDataNode("foo bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"})
+            CSVDataNode(
+                "foo bar", Scope.SCENARIO, properties={"path": default_path, "has_header": False, "name": "super name"}
+            )
 
 
     def test_modin_deprecated_in_favor_of_pandas(self):
     def test_modin_deprecated_in_favor_of_pandas(self):
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")

+ 46 - 5
tests/core/data/test_excel_data_node.py

@@ -23,6 +23,7 @@ import pytest
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.excel import ExcelDataNode
 from taipy.core.data.excel import ExcelDataNode
 from taipy.core.exceptions.exceptions import (
 from taipy.core.exceptions.exceptions import (
@@ -75,11 +76,10 @@ class TestExcelDataNode:
     def test_create(self):
     def test_create(self):
         path = "data/node/path"
         path = "data/node/path"
         sheet_names = ["sheet_name_1", "sheet_name_2"]
         sheet_names = ["sheet_name_1", "sheet_name_2"]
-        dn = ExcelDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "name": "super name"},
+        excel_dn_config = Config.configure_excel_data_node(
+            id="foo_bar", default_path=path, has_header=False, sheet_name="Sheet1", name="super name"
         )
         )
+        dn = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config, None, None)
         assert isinstance(dn, ExcelDataNode)
         assert isinstance(dn, ExcelDataNode)
         assert dn.storage_type() == "excel"
         assert dn.storage_type() == "excel"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -93,7 +93,48 @@ class TestExcelDataNode:
         assert not dn.is_ready_for_reading
         assert not dn.is_ready_for_reading
         assert dn.path == path
         assert dn.path == path
         assert dn.has_header is False
         assert dn.has_header is False
-        assert dn.sheet_name == sheet_names
+        assert dn.sheet_name == "Sheet1"
+
+        excel_dn_config_1 = Config.configure_excel_data_node(
+            id="baz", default_path=path, has_header=True, sheet_name="Sheet1", exposed_type=MyCustomObject
+        )
+        dn_1 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_1, None, None)
+        assert isinstance(dn_1, ExcelDataNode)
+        assert dn_1.has_header is True
+        assert dn_1.sheet_name == "Sheet1"
+        assert dn_1.exposed_type == MyCustomObject
+
+        excel_dn_config_2 = Config.configure_excel_data_node(
+            id="baz",
+            default_path=path,
+            has_header=True,
+            sheet_name=sheet_names,
+            exposed_type={"Sheet1": "pandas", "Sheet2": "numpy"},
+        )
+        dn_2 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_2, None, None)
+        assert isinstance(dn_2, ExcelDataNode)
+        assert dn_2.sheet_name == sheet_names
+        assert dn_2.exposed_type == {"Sheet1": "pandas", "Sheet2": "numpy"}
+
+        excel_dn_config_3 = Config.configure_excel_data_node(
+            id="baz", default_path=path, has_header=True, sheet_name=sheet_names, exposed_type=MyCustomObject
+        )
+        dn_3 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_3, None, None)
+        assert isinstance(dn_3, ExcelDataNode)
+        assert dn_3.sheet_name == sheet_names
+        assert dn_3.exposed_type == MyCustomObject
+
+        excel_dn_config_4 = Config.configure_excel_data_node(
+            id="baz",
+            default_path=path,
+            has_header=True,
+            sheet_name=sheet_names,
+            exposed_type={"Sheet1": MyCustomObject, "Sheet2": MyCustomObject2},
+        )
+        dn_4 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_4, None, None)
+        assert isinstance(dn_4, ExcelDataNode)
+        assert dn_4.sheet_name == sheet_names
+        assert dn_4.exposed_type == {"Sheet1": MyCustomObject, "Sheet2": MyCustomObject2}
 
 
     def test_get_user_properties(self, excel_file):
     def test_get_user_properties(self, excel_file):
         dn_1 = ExcelDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})
         dn_1 = ExcelDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})

+ 0 - 55
tests/core/data/test_filter_sql_table_data_node.py

@@ -9,7 +9,6 @@
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # specific language governing permissions and limitations under the License.
 # specific language governing permissions and limitations under the License.
 
 
-from importlib import util
 from unittest.mock import patch
 from unittest.mock import patch
 
 
 import numpy as np
 import numpy as np
@@ -30,60 +29,6 @@ class MyCustomObject:
 
 
 
 
 class TestFilterSQLTableDataNode:
 class TestFilterSQLTableDataNode:
-    __pandas_properties = [
-        {
-            "db_name": "taipy",
-            "db_engine": "sqlite",
-            "table_name": "example",
-            "db_extra_args": {
-                "TrustServerCertificate": "yes",
-                "other": "value",
-            },
-        },
-    ]
-
-    if util.find_spec("pyodbc"):
-        __pandas_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mssql",
-                "table_name": "example",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
-
-    if util.find_spec("pymysql"):
-        __pandas_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "mysql",
-                "table_name": "example",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
-
-    if util.find_spec("psycopg2"):
-        __pandas_properties.append(
-            {
-                "db_username": "sa",
-                "db_password": "Passw0rd",
-                "db_name": "taipy",
-                "db_engine": "postgresql",
-                "table_name": "example",
-                "db_extra_args": {
-                    "TrustServerCertificate": "yes",
-                },
-            },
-        )
-
     def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_pandas_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {

+ 73 - 61
tests/core/data/test_generic_data_node.py

@@ -11,8 +11,10 @@
 
 
 import pytest
 import pytest
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node import DataNode
 from taipy.core.data.data_node import DataNode
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.generic import GenericDataNode
 from taipy.core.data.generic import GenericDataNode
@@ -52,10 +54,12 @@ def reset_data():
 class TestGenericDataNode:
 class TestGenericDataNode:
     data = list(range(10))
     data = list(range(10))
 
 
-    def test_create(self):
-        dn = GenericDataNode(
-            "foo_bar", Scope.SCENARIO, properties={"read_fct": read_fct, "write_fct": write_fct, "name": "super name"}
+    def test_create_with_both_read_fct_and_write_fct(self):
+        data_manager = _DataManagerFactory._build_manager()
+        generic_dn_config = Config.configure_generic_data_node(
+            id="foo_bar", read_fct=read_fct, write_fct=write_fct, name="super name"
         )
         )
+        dn = data_manager._create_and_set(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.storage_type() == "generic"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -69,68 +73,76 @@ class TestGenericDataNode:
         assert dn.properties["read_fct"] == read_fct
         assert dn.properties["read_fct"] == read_fct
         assert dn.properties["write_fct"] == write_fct
         assert dn.properties["write_fct"] == write_fct
 
 
-        dn_1 = GenericDataNode(
-            "foo", Scope.SCENARIO, properties={"read_fct": read_fct, "write_fct": None, "name": "foo"}
-        )
-        assert isinstance(dn, GenericDataNode)
-        assert dn_1.storage_type() == "generic"
-        assert dn_1.config_id == "foo"
-        assert dn_1.name == "foo"
-        assert dn_1.scope == Scope.SCENARIO
-        assert dn_1.id is not None
-        assert dn_1.owner_id is None
-        assert dn_1.last_edit_date is not None
-        assert dn_1.job_ids == []
-        assert dn_1.is_ready_for_reading
-        assert dn_1.properties["read_fct"] == read_fct
-        assert dn_1.properties["write_fct"] is None
-
-        dn_2 = GenericDataNode(
-            "xyz", Scope.SCENARIO, properties={"read_fct": None, "write_fct": write_fct, "name": "xyz"}
-        )
+        with pytest.raises(InvalidConfigurationId):
+            GenericDataNode("foo bar", Scope.SCENARIO, properties={"read_fct": read_fct, "write_fct": write_fct})
+
+    def test_create_with_read_fct_and_none_write_fct(self):
+        data_manager = _DataManagerFactory._build_manager()
+        generic_dn_config = Config.configure_generic_data_node(id="foo", read_fct=read_fct, write_fct=None, name="foo")
+        dn = data_manager._create_and_set(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert isinstance(dn, GenericDataNode)
-        assert dn_2.storage_type() == "generic"
-        assert dn_2.config_id == "xyz"
-        assert dn_2.name == "xyz"
-        assert dn_2.scope == Scope.SCENARIO
-        assert dn_2.id is not None
-        assert dn_2.owner_id is None
-        assert dn_2.last_edit_date is not None
-        assert dn_2.job_ids == []
-        assert dn_2.is_ready_for_reading
-        assert dn_2.properties["read_fct"] is None
-        assert dn_2.properties["write_fct"] == write_fct
-
-        dn_3 = GenericDataNode("xyz", Scope.SCENARIO, properties={"read_fct": read_fct, "name": "xyz"})
+        assert dn.storage_type() == "generic"
+        assert dn.config_id == "foo"
+        assert dn.name == "foo"
+        assert dn.scope == Scope.SCENARIO
+        assert dn.id is not None
+        assert dn.owner_id is None
+        assert dn.last_edit_date is not None
+        assert dn.job_ids == []
+        assert dn.is_ready_for_reading
+        assert dn.properties["read_fct"] == read_fct
+        assert dn.properties["write_fct"] is None
+
+    def test_create_with_write_fct_and_none_read_fct(self):
+        data_manager = _DataManagerFactory._build_manager()
+        generic_dn_config = Config.configure_generic_data_node(id="xyz", read_fct=None, write_fct=write_fct, name="xyz")
+        dn = data_manager._create_and_set(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert isinstance(dn, GenericDataNode)
-        assert dn_3.storage_type() == "generic"
-        assert dn_3.config_id == "xyz"
-        assert dn_3.name == "xyz"
-        assert dn_3.scope == Scope.SCENARIO
-        assert dn_3.id is not None
-        assert dn_3.owner_id is None
-        assert dn_3.last_edit_date is not None
-        assert dn_3.job_ids == []
-        assert dn_3.is_ready_for_reading
-        assert dn_3.properties["read_fct"] == read_fct
-        assert dn_3.properties["write_fct"] is None
-
-        dn_4 = GenericDataNode("xyz", Scope.SCENARIO, properties={"write_fct": write_fct, "name": "xyz"})
+        assert dn.storage_type() == "generic"
+        assert dn.config_id == "xyz"
+        assert dn.name == "xyz"
+        assert dn.scope == Scope.SCENARIO
+        assert dn.id is not None
+        assert dn.owner_id is None
+        assert dn.last_edit_date is not None
+        assert dn.job_ids == []
+        assert dn.is_ready_for_reading
+        assert dn.properties["read_fct"] is None
+        assert dn.properties["write_fct"] == write_fct
+
+    def test_create_with_read_fct(self):
+        data_manager = _DataManagerFactory._build_manager()
+        generic_dn_config = Config.configure_generic_data_node(id="acb", read_fct=read_fct, name="acb")
+        dn = data_manager._create_and_set(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert isinstance(dn, GenericDataNode)
-        assert dn_4.storage_type() == "generic"
-        assert dn_4.config_id == "xyz"
-        assert dn_4.name == "xyz"
-        assert dn_4.scope == Scope.SCENARIO
-        assert dn_4.id is not None
-        assert dn_4.owner_id is None
-        assert dn_4.last_edit_date is not None
-        assert dn_4.job_ids == []
-        assert dn_4.is_ready_for_reading
-        assert dn_4.properties["read_fct"] is None
-        assert dn_4.properties["write_fct"] == write_fct
+        assert dn.storage_type() == "generic"
+        assert dn.config_id == "acb"
+        assert dn.name == "acb"
+        assert dn.scope == Scope.SCENARIO
+        assert dn.id is not None
+        assert dn.owner_id is None
+        assert dn.last_edit_date is not None
+        assert dn.job_ids == []
+        assert dn.is_ready_for_reading
+        assert dn.properties["read_fct"] == read_fct
+        assert dn.properties["write_fct"] is None
 
 
-        with pytest.raises(InvalidConfigurationId):
-            GenericDataNode("foo bar", Scope.SCENARIO, properties={"read_fct": read_fct, "write_fct": write_fct})
+    def test_create_with_write_fct(self):
+        data_manager = _DataManagerFactory._build_manager()
+        generic_dn_config = Config.configure_generic_data_node(id="mno", write_fct=write_fct, name="mno")
+        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        assert isinstance(dn, GenericDataNode)
+        assert dn.storage_type() == "generic"
+        assert dn.config_id == "mno"
+        assert dn.name == "mno"
+        assert dn.scope == Scope.SCENARIO
+        assert dn.id is not None
+        assert dn.owner_id is None
+        assert dn.last_edit_date is not None
+        assert dn.job_ids == []
+        assert dn.is_ready_for_reading
+        assert dn.properties["read_fct"] is None
+        assert dn.properties["write_fct"] == write_fct
 
 
     def test_get_user_properties(self):
     def test_get_user_properties(self):
         dn_1 = GenericDataNode(
         dn_1 = GenericDataNode(

+ 7 - 8
tests/core/data/test_in_memory_data_node.py

@@ -11,8 +11,10 @@
 
 
 import pytest
 import pytest
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.in_memory import InMemoryDataNode
 from taipy.core.data.in_memory import InMemoryDataNode
 from taipy.core.exceptions.exceptions import NoData
 from taipy.core.exceptions.exceptions import NoData
@@ -20,18 +22,14 @@ from taipy.core.exceptions.exceptions import NoData
 
 
 class TestInMemoryDataNodeEntity:
 class TestInMemoryDataNodeEntity:
     def test_create(self):
     def test_create(self):
-        dn = InMemoryDataNode(
-            "foobar_bazy",
-            Scope.SCENARIO,
-            DataNodeId("id_uio"),
-            "owner_id",
-            properties={"default_data": "In memory Data Node", "name": "my name"},
+        in_memory_dn_config = Config.configure_in_memory_data_node(
+            id="foobar_bazy", default_data="In memory Data Node", name="my name"
         )
         )
+        dn = _DataManagerFactory._build_manager()._create_and_set(in_memory_dn_config, "owner_id", None)
         assert isinstance(dn, InMemoryDataNode)
         assert isinstance(dn, InMemoryDataNode)
         assert dn.storage_type() == "in_memory"
         assert dn.storage_type() == "in_memory"
         assert dn.config_id == "foobar_bazy"
         assert dn.config_id == "foobar_bazy"
         assert dn.scope == Scope.SCENARIO
         assert dn.scope == Scope.SCENARIO
-        assert dn.id == "id_uio"
         assert dn.name == "my name"
         assert dn.name == "my name"
         assert dn.owner_id == "owner_id"
         assert dn.owner_id == "owner_id"
         assert dn.last_edit_date is not None
         assert dn.last_edit_date is not None
@@ -39,7 +37,8 @@ class TestInMemoryDataNodeEntity:
         assert dn.is_ready_for_reading
         assert dn.is_ready_for_reading
         assert dn.read() == "In memory Data Node"
         assert dn.read() == "In memory Data Node"
 
 
-        dn_2 = InMemoryDataNode("foo", Scope.SCENARIO)
+        in_memory_dn_config_2 = Config.configure_in_memory_data_node(id="foo")
+        dn_2 = _DataManagerFactory._build_manager()._create_and_set(in_memory_dn_config_2, None, None)
         assert dn_2.last_edit_date is None
         assert dn_2.last_edit_date is None
         assert not dn_2.is_ready_for_reading
         assert not dn_2.is_ready_for_reading
 
 

+ 33 - 13
tests/core/data/test_json_data_node.py

@@ -26,6 +26,7 @@ from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.json import JSONDataNode
 from taipy.core.data.json import JSONDataNode
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.operator import JoinOperator, Operator
@@ -87,21 +88,40 @@ class MyCustomDecoder(json.JSONDecoder):
 class TestJSONDataNode:
 class TestJSONDataNode:
     def test_create(self):
     def test_create(self):
         path = "data/node/path"
         path = "data/node/path"
-        dn = JSONDataNode("foo_bar", Scope.SCENARIO, properties={"default_path": path, "name": "super name"})
-        assert isinstance(dn, JSONDataNode)
-        assert dn.storage_type() == "json"
-        assert dn.config_id == "foo_bar"
-        assert dn.name == "super name"
-        assert dn.scope == Scope.SCENARIO
-        assert dn.id is not None
-        assert dn.owner_id is None
-        assert dn.last_edit_date is None
-        assert dn.job_ids == []
-        assert not dn.is_ready_for_reading
-        assert dn.path == path
+        json_dn_config = Config.configure_json_data_node(id="foo_bar", default_path=path, name="super name")
+        dn_1 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config, None, None)
+        assert isinstance(dn_1, JSONDataNode)
+        assert dn_1.storage_type() == "json"
+        assert dn_1.config_id == "foo_bar"
+        assert dn_1.name == "super name"
+        assert dn_1.scope == Scope.SCENARIO
+        assert dn_1.id is not None
+        assert dn_1.owner_id is None
+        assert dn_1.last_edit_date is None
+        assert dn_1.job_ids == []
+        assert not dn_1.is_ready_for_reading
+        assert dn_1.path == path
+
+        json_dn_config_2 = Config.configure_json_data_node(id="foo", default_path=path, encoding="utf-16")
+        dn_2 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config_2, None, None)
+        assert isinstance(dn_2, JSONDataNode)
+        assert dn_2.storage_type() == "json"
+        assert dn_2.properties["encoding"] == "utf-16"
+        assert dn_2.encoding == "utf-16"
+
+        json_dn_config_3 = Config.configure_json_data_node(
+            id="foo", default_path=path, encoder=MyCustomEncoder, decoder=MyCustomDecoder
+        )
+        dn_3 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config_3, None, None)
+        assert isinstance(dn_3, JSONDataNode)
+        assert dn_3.storage_type() == "json"
+        assert dn_3.properties["encoder"] == MyCustomEncoder
+        assert dn_3.encoder == MyCustomEncoder
+        assert dn_3.properties["decoder"] == MyCustomDecoder
+        assert dn_3.decoder == MyCustomDecoder
 
 
         with pytest.raises(InvalidConfigurationId):
         with pytest.raises(InvalidConfigurationId):
-            dn = JSONDataNode(
+            _ = JSONDataNode(
                 "foo bar", Scope.SCENARIO, properties={"default_path": path, "has_header": False, "name": "super name"}
                 "foo bar", Scope.SCENARIO, properties={"default_path": path, "has_header": False, "name": "super name"}
             )
             )
 
 

+ 4 - 5
tests/core/data/test_mongo_data_node.py

@@ -20,9 +20,11 @@ import pytest
 from bson import ObjectId
 from bson import ObjectId
 from bson.errors import InvalidDocument
 from bson.errors import InvalidDocument
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.core import MongoDefaultDocument
 from taipy.core import MongoDefaultDocument
 from taipy.core.common._mongo_connector import _connect_mongodb
 from taipy.core.common._mongo_connector import _connect_mongodb
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.mongo import MongoCollectionDataNode
 from taipy.core.data.mongo import MongoCollectionDataNode
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.operator import JoinOperator, Operator
@@ -76,11 +78,8 @@ class TestMongoCollectionDataNode:
 
 
     @pytest.mark.parametrize("properties", __properties)
     @pytest.mark.parametrize("properties", __properties)
     def test_create(self, properties):
     def test_create(self, properties):
-        mongo_dn = MongoCollectionDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties=properties,
-        )
+        mongo_dn_config = Config.configure_mongo_collection_data_node("foo_bar", **properties)
+        mongo_dn = _DataManagerFactory._build_manager()._create_and_set(mongo_dn_config, None, None)
         assert isinstance(mongo_dn, MongoCollectionDataNode)
         assert isinstance(mongo_dn, MongoCollectionDataNode)
         assert mongo_dn.storage_type() == "mongo_collection"
         assert mongo_dn.storage_type() == "mongo_collection"
         assert mongo_dn.config_id == "foo_bar"
         assert mongo_dn.config_id == "foo_bar"

+ 11 - 2
tests/core/data/test_parquet_data_node.py

@@ -23,6 +23,7 @@ from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.parquet import ParquetDataNode
 from taipy.core.data.parquet import ParquetDataNode
 from taipy.core.exceptions.exceptions import (
 from taipy.core.exceptions.exceptions import (
@@ -65,9 +66,10 @@ class TestParquetDataNode:
     def test_create(self):
     def test_create(self):
         path = "data/node/path"
         path = "data/node/path"
         compression = "snappy"
         compression = "snappy"
-        dn = ParquetDataNode(
-            "foo_bar", Scope.SCENARIO, properties={"path": path, "compression": compression, "name": "super name"}
+        parquet_dn_config = Config.configure_parquet_data_node(
+            id="foo_bar", default_path=path, compression=compression, name="super name"
         )
         )
+        dn = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config, None, None)
         assert isinstance(dn, ParquetDataNode)
         assert isinstance(dn, ParquetDataNode)
         assert dn.storage_type() == "parquet"
         assert dn.storage_type() == "parquet"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -83,6 +85,13 @@ class TestParquetDataNode:
         assert dn.compression == "snappy"
         assert dn.compression == "snappy"
         assert dn.engine == "pyarrow"
         assert dn.engine == "pyarrow"
 
 
+        parquet_dn_config_1 = Config.configure_parquet_data_node(
+            id="bar", default_path=path, compression=compression, exposed_type=MyCustomObject
+        )
+        dn_1 = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config_1, None, None)
+        assert isinstance(dn_1, ParquetDataNode)
+        assert dn_1.exposed_type == MyCustomObject
+
         with pytest.raises(InvalidConfigurationId):
         with pytest.raises(InvalidConfigurationId):
             dn = ParquetDataNode("foo bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
             dn = ParquetDataNode("foo bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
 
 

+ 11 - 2
tests/core/data/test_pickle_data_node.py

@@ -21,6 +21,7 @@ from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.config.exceptions.exceptions import InvalidConfigurationId
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.pickle import PickleDataNode
 from taipy.core.data.pickle import PickleDataNode
 from taipy.core.exceptions.exceptions import NoData
 from taipy.core.exceptions.exceptions import NoData
 
 
@@ -42,9 +43,17 @@ class TestPickleDataNodeEntity:
         for f in glob.glob("*.p"):
         for f in glob.glob("*.p"):
             os.remove(f)
             os.remove(f)
 
 
+    def test_create_with_manager(self, pickle_file_path):
+        parquet_dn_config = Config.configure_pickle_data_node(id="baz", default_path=pickle_file_path)
+        parquet_dn = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config, None, None)
+        assert isinstance(parquet_dn, PickleDataNode)
+
     def test_create(self):
     def test_create(self):
-        dn = PickleDataNode("foobar_bazxyxea", Scope.SCENARIO, properties={"default_data": "Data"})
-        assert os.path.isfile(os.path.join(Config.core.storage_folder.strip("/"), "pickles", dn.id + ".p"))
+        pickle_dn_config = Config.configure_pickle_data_node(
+            id="foobar_bazxyxea", default_path="Data", default_data="Data"
+        )
+        dn = _DataManagerFactory._build_manager()._create_and_set(pickle_dn_config, None, None)
+
         assert isinstance(dn, PickleDataNode)
         assert isinstance(dn, PickleDataNode)
         assert dn.storage_type() == "pickle"
         assert dn.storage_type() == "pickle"
         assert dn.config_id == "foobar_bazxyxea"
         assert dn.config_id == "foobar_bazxyxea"

+ 2 - 2
tests/core/data/test_read_excel_data_node.py

@@ -300,7 +300,7 @@ def test_read_multi_sheet_with_header_pandas():
     assert isinstance(data_pandas, Dict)
     assert isinstance(data_pandas, Dict)
     assert len(data_pandas) == 2
     assert len(data_pandas) == 2
     assert all(
     assert all(
-        len(data_pandas[sheet_name] == 5) and isinstance(data_pandas[sheet_name], pd.DataFrame)
+        len(data_pandas[sheet_name]) == 5 and isinstance(data_pandas[sheet_name], pd.DataFrame)
         for sheet_name in sheet_names
         for sheet_name in sheet_names
     )
     )
     assert list(data_pandas.keys()) == sheet_names
     assert list(data_pandas.keys()) == sheet_names
@@ -331,7 +331,7 @@ def test_read_multi_sheet_with_header_numpy():
     assert isinstance(data_numpy, Dict)
     assert isinstance(data_numpy, Dict)
     assert len(data_numpy) == 2
     assert len(data_numpy) == 2
     assert all(
     assert all(
-        len(data_numpy[sheet_name] == 5) and isinstance(data_numpy[sheet_name], np.ndarray)
+        len(data_numpy[sheet_name]) == 5 and isinstance(data_numpy[sheet_name], np.ndarray)
         for sheet_name in sheet_names
         for sheet_name in sheet_names
     )
     )
     assert list(data_numpy.keys()) == sheet_names
     assert list(data_numpy.keys()) == sheet_names

+ 111 - 13
tests/core/data/test_read_sql_table_data_node.py

@@ -17,6 +17,7 @@ import pandas as pd
 import pytest
 import pytest
 
 
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
+from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.sql_table import SQLTableDataNode
 from taipy.core.data.sql_table import SQLTableDataNode
 
 
 
 
@@ -29,7 +30,7 @@ class MyCustomObject:
 
 
 
 
 class TestReadSQLTableDataNode:
 class TestReadSQLTableDataNode:
-    __pandas_properties = [
+    __sql_properties = [
         {
         {
             "db_name": "taipy",
             "db_name": "taipy",
             "db_engine": "sqlite",
             "db_engine": "sqlite",
@@ -42,7 +43,7 @@ class TestReadSQLTableDataNode:
     ]
     ]
 
 
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -56,7 +57,7 @@ class TestReadSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -70,7 +71,7 @@ class TestReadSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -87,9 +88,9 @@ class TestReadSQLTableDataNode:
     def mock_read_value():
     def mock_read_value():
         return {"foo": ["baz", "quux", "corge"], "bar": ["quux", "quuz", None]}
         return {"foo": ["baz", "quux", "corge"], "bar": ["quux", "quuz", None]}
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_read_pandas(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("sql_properties", __sql_properties)
+    def test_read_pandas(self, sql_properties):
+        custom_properties = sql_properties.copy()
 
 
         sql_data_node_as_pandas = SQLTableDataNode(
         sql_data_node_as_pandas = SQLTableDataNode(
             "foo",
             "foo",
@@ -105,9 +106,106 @@ class TestReadSQLTableDataNode:
             assert isinstance(pandas_data, pd.DataFrame)
             assert isinstance(pandas_data, pd.DataFrame)
             assert pandas_data.equals(pd.DataFrame(self.mock_read_value()))
             assert pandas_data.equals(pd.DataFrame(self.mock_read_value()))
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_read_numpy(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    def test_build_connection_string(self):
+        sql_properties = {
+            "db_username": "sa",
+            "db_password": "Passw0rd",
+            "db_name": "taipy",
+            "db_engine": "mssql",
+            "table_name": "example",
+            "db_driver": "default server",
+            "db_extra_args": {
+                "TrustServerCertificate": "yes",
+                "other": "value",
+            },
+        }
+        custom_properties = sql_properties.copy()
+        mssql_sql_data_node = SQLTableDataNode(
+            "foo",
+            Scope.SCENARIO,
+            properties=custom_properties,
+        )
+        assert (
+            mssql_sql_data_node._conn_string()
+            == "mssql+pyodbc://sa:Passw0rd@localhost:1433/taipy?TrustServerCertificate=yes&other=value&driver=default+server"
+        )
+
+        custom_properties["db_engine"] = "mysql"
+        mysql_sql_data_node = SQLTableDataNode(
+            "foo",
+            Scope.SCENARIO,
+            properties=custom_properties,
+        )
+        assert (
+            mysql_sql_data_node._conn_string()
+            == "mysql+pymysql://sa:Passw0rd@localhost:1433/taipy?TrustServerCertificate=yes&other=value&driver=default+server"
+        )
+
+        custom_properties["db_engine"] = "postgresql"
+        postgresql_sql_data_node = SQLTableDataNode(
+            "foo",
+            Scope.SCENARIO,
+            properties=custom_properties,
+        )
+        assert (
+            postgresql_sql_data_node._conn_string()
+            == "postgresql+psycopg2://sa:Passw0rd@localhost:1433/taipy?TrustServerCertificate=yes&other=value&driver=default+server"
+        )
+
+        custom_properties["db_engine"] = "sqlite"
+        sqlite_sql_data_node = SQLTableDataNode(
+            "foo",
+            Scope.SCENARIO,
+            properties=custom_properties,
+        )
+        assert sqlite_sql_data_node._conn_string() == "sqlite:///taipy.db"
+
+    @pytest.mark.parametrize("sql_properties", __sql_properties)
+    def test_get_read_query(self, sql_properties):
+        custom_properties = sql_properties.copy()
+
+        sql_data_node = SQLTableDataNode(
+            "foo",
+            Scope.SCENARIO,
+            properties=custom_properties,
+        )
+
+        assert sql_data_node._get_read_query(("key", 1, Operator.EQUAL)) == "SELECT * FROM example WHERE key = '1'"
+        assert sql_data_node._get_read_query(("key", 1, Operator.NOT_EQUAL)) == "SELECT * FROM example WHERE key <> '1'"
+        assert (
+            sql_data_node._get_read_query(("key", 1, Operator.GREATER_THAN)) == "SELECT * FROM example WHERE key > '1'"
+        )
+        assert (
+            sql_data_node._get_read_query(("key", 1, Operator.GREATER_OR_EQUAL))
+            == "SELECT * FROM example WHERE key >= '1'"
+        )
+        assert sql_data_node._get_read_query(("key", 1, Operator.LESS_THAN)) == "SELECT * FROM example WHERE key < '1'"
+        assert (
+            sql_data_node._get_read_query(("key", 1, Operator.LESS_OR_EQUAL))
+            == "SELECT * FROM example WHERE key <= '1'"
+        )
+
+        with pytest.raises(NotImplementedError):
+            sql_data_node._get_read_query(
+                [("key", 1, Operator.EQUAL), ("key2", 2, Operator.GREATER_THAN)], "SOME JoinOperator"
+            )
+
+        assert (
+            sql_data_node._get_read_query(
+                [("key", 1, Operator.EQUAL), ("key2", 2, Operator.GREATER_THAN)], JoinOperator.AND
+            )
+            == "SELECT * FROM example WHERE key = '1' AND key2 > '2'"
+        )
+        assert (
+            sql_data_node._get_read_query(
+                [("key", 1, Operator.EQUAL), ("key2", 2, Operator.GREATER_THAN)], JoinOperator.OR
+            )
+            == "SELECT * FROM example WHERE key = '1' OR key2 > '2'"
+        )
+
+    @pytest.mark.parametrize("sql_properties", __sql_properties)
+    def test_read_numpy(self, sql_properties):
+        custom_properties = sql_properties.copy()
         custom_properties["exposed_type"] = "numpy"
         custom_properties["exposed_type"] = "numpy"
 
 
         sql_data_node_as_pandas = SQLTableDataNode(
         sql_data_node_as_pandas = SQLTableDataNode(
@@ -124,9 +222,9 @@ class TestReadSQLTableDataNode:
             assert isinstance(numpy_data, np.ndarray)
             assert isinstance(numpy_data, np.ndarray)
             assert np.array_equal(numpy_data, pd.DataFrame(self.mock_read_value()).to_numpy())
             assert np.array_equal(numpy_data, pd.DataFrame(self.mock_read_value()).to_numpy())
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_read_custom_exposed_type(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("sql_properties", __sql_properties)
+    def test_read_custom_exposed_type(self, sql_properties):
+        custom_properties = sql_properties.copy()
 
 
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         custom_properties["exposed_type"] = MyCustomObject
         custom_properties["exposed_type"] = MyCustomObject

+ 66 - 30
tests/core/data/test_sql_data_node.py

@@ -17,11 +17,13 @@ import pandas as pd
 import pytest
 import pytest
 from pandas.testing import assert_frame_equal
 from pandas.testing import assert_frame_equal
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.sql import SQLDataNode
 from taipy.core.data.sql import SQLDataNode
-from taipy.core.exceptions.exceptions import MissingAppendQueryBuilder, MissingRequiredProperty
+from taipy.core.exceptions.exceptions import MissingAppendQueryBuilder, MissingRequiredProperty, UnknownDatabaseEngine
 
 
 
 
 class MyCustomObject:
 class MyCustomObject:
@@ -36,6 +38,7 @@ def my_write_query_builder_with_pandas(data: pd.DataFrame):
     insert_data = data.to_dict("records")
     insert_data = data.to_dict("records")
     return ["DELETE FROM example", ("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
     return ["DELETE FROM example", ("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
 
 
+
 def my_append_query_builder_with_pandas(data: pd.DataFrame):
 def my_append_query_builder_with_pandas(data: pd.DataFrame):
     insert_data = data.to_dict("records")
     insert_data = data.to_dict("records")
     return [("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
     return [("INSERT INTO example VALUES (:foo, :bar)", insert_data)]
@@ -46,7 +49,7 @@ def single_write_query_builder(data):
 
 
 
 
 class TestSQLDataNode:
 class TestSQLDataNode:
-    __pandas_properties = [
+    __sql_properties = [
         {
         {
             "db_name": "taipy.sqlite3",
             "db_name": "taipy.sqlite3",
             "db_engine": "sqlite",
             "db_engine": "sqlite",
@@ -60,7 +63,7 @@ class TestSQLDataNode:
     ]
     ]
 
 
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -74,9 +77,8 @@ class TestSQLDataNode:
             },
             },
         )
         )
 
 
-
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -90,9 +92,8 @@ class TestSQLDataNode:
             },
             },
         )
         )
 
 
-
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -106,14 +107,10 @@ class TestSQLDataNode:
             },
             },
         )
         )
 
 
-
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_create(self, pandas_properties):
-        dn = SQLDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties=pandas_properties,
-        )
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_create(self, properties):
+        sql_dn_config = Config.configure_sql_data_node(id="foo_bar", **properties)
+        dn = _DataManagerFactory._build_manager()._create_and_set(sql_dn_config, None, None)
         assert isinstance(dn, SQLDataNode)
         assert isinstance(dn, SQLDataNode)
         assert dn.storage_type() == "sql"
         assert dn.storage_type() == "sql"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -126,8 +123,18 @@ class TestSQLDataNode:
         assert dn.read_query == "SELECT * FROM example"
         assert dn.read_query == "SELECT * FROM example"
         assert dn.write_query_builder == my_write_query_builder_with_pandas
         assert dn.write_query_builder == my_write_query_builder_with_pandas
 
 
+        sql_dn_config_1 = Config.configure_sql_data_node(
+            id="foo",
+            **properties,
+            append_query_builder=my_append_query_builder_with_pandas,
+            exposed_type=MyCustomObject,
+        )
+        dn_1 = _DataManagerFactory._build_manager()._create_and_set(sql_dn_config_1, None, None)
+        assert isinstance(dn, SQLDataNode)
+        assert dn_1.exposed_type == MyCustomObject
+        assert dn_1.append_query_builder == my_append_query_builder_with_pandas
 
 
-    @pytest.mark.parametrize("properties", __pandas_properties)
+    @pytest.mark.parametrize("properties", __sql_properties)
     def test_get_user_properties(self, properties):
     def test_get_user_properties(self, properties):
         custom_properties = properties.copy()
         custom_properties = properties.copy()
         custom_properties["foo"] = "bar"
         custom_properties["foo"] = "bar"
@@ -142,24 +149,54 @@ class TestSQLDataNode:
         "properties",
         "properties",
         [
         [
             {},
             {},
-            {"db_username": "foo"},
-            {"db_username": "foo", "db_password": "foo"},
-            {"db_username": "foo", "db_password": "foo", "db_name": "foo"},
-            {"engine": "sqlite"},
-            {"engine": "mssql", "db_name": "foo"},
-            {"engine": "mysql", "db_username": "foo"},
-            {"engine": "postgresql", "db_username": "foo", "db_password": "foo"},
+            {"read_query": "ready query"},
+            {"read_query": "ready query", "write_query_builder": "write query"},
+            {"read_query": "ready query", "write_query_builder": "write query", "db_username": "foo"},
+            {
+                "read_query": "ready query",
+                "write_query_builder": "write query",
+                "db_username": "foo",
+                "db_password": "foo",
+            },
+            {
+                "read_query": "ready query",
+                "write_query_builder": "write query",
+                "db_username": "foo",
+                "db_password": "foo",
+                "db_name": "foo",
+            },
+            {"read_query": "ready query", "write_query_builder": "write query", "db_engine": "some engine"},
+            {"read_query": "ready query", "write_query_builder": "write query", "db_engine": "sqlite"},
+            {"read_query": "ready query", "write_query_builder": "write query", "db_engine": "mssql", "db_name": "foo"},
+            {
+                "read_query": "ready query",
+                "write_query_builder": "write query",
+                "db_engine": "mysql",
+                "db_username": "foo",
+            },
+            {
+                "read_query": "ready query",
+                "write_query_builder": "write query",
+                "db_engine": "postgresql",
+                "db_username": "foo",
+                "db_password": "foo",
+            },
         ],
         ],
     )
     )
     def test_create_with_missing_parameters(self, properties):
     def test_create_with_missing_parameters(self, properties):
         with pytest.raises(MissingRequiredProperty):
         with pytest.raises(MissingRequiredProperty):
             SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"))
             SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"))
-        with pytest.raises(MissingRequiredProperty):
-            SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
-
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_write_query_builder(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+        engine = properties.get("db_engine")
+        if engine is not None and engine not in ["sqlite", "mssql", "mysql", "postgresql"]:
+            with pytest.raises(UnknownDatabaseEngine):
+                SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
+        else:
+            with pytest.raises(MissingRequiredProperty):
+                SQLDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
+
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_write_query_builder(self, properties):
+        custom_properties = properties.copy()
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
         with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
         with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
@@ -184,7 +221,6 @@ class TestSQLDataNode:
             assert len(engine_mock.mock_calls[4].args) == 1
             assert len(engine_mock.mock_calls[4].args) == 1
             assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
             assert engine_mock.mock_calls[4].args[0].text == "DELETE FROM example"
 
 
-
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "tmp_sqlite_path",
         "tmp_sqlite_path",
         [
         [

+ 28 - 22
tests/core/data/test_sql_table_data_node.py

@@ -14,7 +14,9 @@ from unittest.mock import patch
 
 
 import pytest
 import pytest
 
 
+from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.data_node_id import DataNodeId
 from taipy.core.data.sql_table import SQLTableDataNode
 from taipy.core.data.sql_table import SQLTableDataNode
 from taipy.core.exceptions.exceptions import InvalidExposedType, MissingRequiredProperty
 from taipy.core.exceptions.exceptions import InvalidExposedType, MissingRequiredProperty
@@ -29,7 +31,7 @@ class MyCustomObject:
 
 
 
 
 class TestSQLTableDataNode:
 class TestSQLTableDataNode:
-    __pandas_properties = [
+    __sql_properties = [
         {
         {
             "db_name": "taipy",
             "db_name": "taipy",
             "db_engine": "sqlite",
             "db_engine": "sqlite",
@@ -42,7 +44,7 @@ class TestSQLTableDataNode:
     ]
     ]
 
 
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -56,7 +58,7 @@ class TestSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -70,7 +72,7 @@ class TestSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -83,13 +85,10 @@ class TestSQLTableDataNode:
             },
             },
         )
         )
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_create(self, pandas_properties):
-        dn = SQLTableDataNode(
-            "foo_bar",
-            Scope.SCENARIO,
-            properties=pandas_properties,
-        )
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_create(self, properties):
+        sql_table_dn_config = Config.configure_sql_table_data_node("foo_bar", **properties)
+        dn = _DataManagerFactory._build_manager()._create_and_set(sql_table_dn_config, None, None)
         assert isinstance(dn, SQLTableDataNode)
         assert isinstance(dn, SQLTableDataNode)
         assert dn.storage_type() == "sql_table"
         assert dn.storage_type() == "sql_table"
         assert dn.config_id == "foo_bar"
         assert dn.config_id == "foo_bar"
@@ -102,7 +101,14 @@ class TestSQLTableDataNode:
         assert dn.table_name == "example"
         assert dn.table_name == "example"
         assert dn._get_base_read_query() == "SELECT * FROM example"
         assert dn._get_base_read_query() == "SELECT * FROM example"
 
 
-    @pytest.mark.parametrize("properties", __pandas_properties)
+        sql_table_dn_config_1 = Config.configure_sql_table_data_node(
+            "foo_bar", **properties, exposed_type=MyCustomObject
+        )
+        dn_1 = _DataManagerFactory._build_manager()._create_and_set(sql_table_dn_config_1, None, None)
+        assert isinstance(dn_1, SQLTableDataNode)
+        assert dn_1.exposed_type == MyCustomObject
+
+    @pytest.mark.parametrize("properties", __sql_properties)
     def test_get_user_properties(self, properties):
     def test_get_user_properties(self, properties):
         custom_properties = properties.copy()
         custom_properties = properties.copy()
         custom_properties["foo"] = "bar"
         custom_properties["foo"] = "bar"
@@ -129,28 +135,28 @@ class TestSQLTableDataNode:
             SQLTableDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
             SQLTableDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
 
 
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe", return_value="pandas")
     @patch("taipy.core.data.sql_table.SQLTableDataNode._read_as_pandas_dataframe", return_value="pandas")
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_modin_deprecated_in_favor_of_pandas(self, mock_read_as_pandas_dataframe, pandas_properties):
-        pandas_properties["exposed_type"] = "modin"
-        sql_data_node_as_modin = SQLTableDataNode("foo", Scope.SCENARIO, properties=pandas_properties)
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_modin_deprecated_in_favor_of_pandas(self, mock_read_as_pandas_dataframe, properties):
+        properties["exposed_type"] = "modin"
+        sql_data_node_as_modin = SQLTableDataNode("foo", Scope.SCENARIO, properties=properties)
         assert sql_data_node_as_modin.properties["exposed_type"] == "pandas"
         assert sql_data_node_as_modin.properties["exposed_type"] == "pandas"
         assert sql_data_node_as_modin.read() == "pandas"
         assert sql_data_node_as_modin.read() == "pandas"
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_raise_error_invalid_exposed_type(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_raise_error_invalid_exposed_type(self, properties):
+        custom_properties = properties.copy()
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         custom_properties["exposed_type"] = "foo"
         custom_properties["exposed_type"] = "foo"
         with pytest.raises(InvalidExposedType):
         with pytest.raises(InvalidExposedType):
             SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
             SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
+    @pytest.mark.parametrize("properties", __sql_properties)
     @patch("pandas.read_sql_query")
     @patch("pandas.read_sql_query")
-    def test_engine_cache(self, _, pandas_properties):
+    def test_engine_cache(self, _, properties):
         dn = SQLTableDataNode(
         dn = SQLTableDataNode(
             "foo",
             "foo",
             Scope.SCENARIO,
             Scope.SCENARIO,
-            properties=pandas_properties,
+            properties=properties,
         )
         )
 
 
         assert dn._engine is None
         assert dn._engine is None

+ 16 - 16
tests/core/data/test_write_sql_table_data_node.py

@@ -28,7 +28,7 @@ class MyCustomObject:
 
 
 
 
 class TestWriteSQLTableDataNode:
 class TestWriteSQLTableDataNode:
-    __pandas_properties = [
+    __sql_properties = [
         {
         {
             "db_name": "taipy",
             "db_name": "taipy",
             "db_engine": "sqlite",
             "db_engine": "sqlite",
@@ -41,7 +41,7 @@ class TestWriteSQLTableDataNode:
     ]
     ]
 
 
     if util.find_spec("pyodbc"):
     if util.find_spec("pyodbc"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -55,7 +55,7 @@ class TestWriteSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("pymysql"):
     if util.find_spec("pymysql"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -69,7 +69,7 @@ class TestWriteSQLTableDataNode:
         )
         )
 
 
     if util.find_spec("psycopg2"):
     if util.find_spec("psycopg2"):
-        __pandas_properties.append(
+        __sql_properties.append(
             {
             {
                 "db_username": "sa",
                 "db_username": "sa",
                 "db_password": "Passw0rd",
                 "db_password": "Passw0rd",
@@ -82,9 +82,9 @@ class TestWriteSQLTableDataNode:
             },
             },
         )
         )
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_write_pandas(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_write_pandas(self, properties):
+        custom_properties = properties.copy()
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
 
 
@@ -94,7 +94,7 @@ class TestWriteSQLTableDataNode:
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock.execute.side_effect = None
             cursor_mock.execute.side_effect = None
 
 
-            with patch("taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe") as mck:
+            with patch("taipy.core.data.sql_table.SQLTableDataNode._insert_dataframe") as mck:
                 df = pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])
                 df = pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])
                 sql_table_dn.write(df)
                 sql_table_dn.write(df)
                 assert mck.call_count == 1
                 assert mck.call_count == 1
@@ -112,9 +112,9 @@ class TestWriteSQLTableDataNode:
                 sql_table_dn.write(None)
                 sql_table_dn.write(None)
                 assert mck.call_count == 5
                 assert mck.call_count == 5
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_write_numpy(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_write_numpy(self, properties):
+        custom_properties = properties.copy()
         custom_properties["exposed_type"] = "numpy"
         custom_properties["exposed_type"] = "numpy"
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
@@ -125,7 +125,7 @@ class TestWriteSQLTableDataNode:
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock.execute.side_effect = None
             cursor_mock.execute.side_effect = None
 
 
-            with patch("taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe") as mck:
+            with patch("taipy.core.data.sql_table.SQLTableDataNode._insert_dataframe") as mck:
                 arr = np.array([[1], [2], [3], [4], [5]])
                 arr = np.array([[1], [2], [3], [4], [5]])
                 sql_table_dn.write(arr)
                 sql_table_dn.write(arr)
                 assert mck.call_count == 1
                 assert mck.call_count == 1
@@ -139,9 +139,9 @@ class TestWriteSQLTableDataNode:
                 sql_table_dn.write(None)
                 sql_table_dn.write(None)
                 assert mck.call_count == 4
                 assert mck.call_count == 4
 
 
-    @pytest.mark.parametrize("pandas_properties", __pandas_properties)
-    def test_write_custom_exposed_type(self, pandas_properties):
-        custom_properties = pandas_properties.copy()
+    @pytest.mark.parametrize("properties", __sql_properties)
+    def test_write_custom_exposed_type(self, properties):
+        custom_properties = properties.copy()
         custom_properties["exposed_type"] = MyCustomObject
         custom_properties["exposed_type"] = MyCustomObject
         custom_properties.pop("db_extra_args")
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
@@ -152,7 +152,7 @@ class TestWriteSQLTableDataNode:
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock = engine_mock.return_value.__enter__.return_value
             cursor_mock.execute.side_effect = None
             cursor_mock.execute.side_effect = None
 
 
-            with patch("taipy.core.data.sql_table.SQLTableDataNode._SQLTableDataNode__insert_dataframe") as mck:
+            with patch("taipy.core.data.sql_table.SQLTableDataNode._insert_dataframe") as mck:
                 custom_data = [
                 custom_data = [
                     MyCustomObject(1, 2),
                     MyCustomObject(1, 2),
                     MyCustomObject(3, 4),
                     MyCustomObject(3, 4),