Bläddra i källkod

refactor: remove _Manager._set() api and add _Manager._update()

trgiangdo 1 månad sedan
förälder
incheckning
be56a7398f
78 ändrade filer med 926 tillägg och 665 borttagningar
  1. 1 1
      taipy/core/_entity/_entity.py
  2. 5 5
      taipy/core/_entity/_reload.py
  3. 7 4
      taipy/core/_manager/_manager.py
  4. 1 1
      taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py
  5. 5 5
      taipy/core/_version/_cli/_version_cli.py
  6. 2 2
      taipy/core/_version/_version_manager.py
  7. 1 1
      taipy/core/cycle/_cycle_manager.py
  8. 3 3
      taipy/core/data/_data_manager.py
  9. 8 6
      taipy/core/data/_file_datanode_mixin.py
  10. 2 2
      taipy/core/data/data_node.py
  11. 0 1
      taipy/core/data/parquet.py
  12. 24 1
      taipy/core/exceptions/exceptions.py
  13. 1 1
      taipy/core/job/_job_manager.py
  14. 27 22
      taipy/core/scenario/_scenario_duplicator.py
  15. 6 6
      taipy/core/scenario/_scenario_manager.py
  16. 5 5
      taipy/core/sequence/_sequence_manager.py
  17. 3 3
      taipy/core/submission/_submission_manager.py
  18. 18 12
      taipy/core/taipy.py
  19. 12 11
      taipy/core/task/_task_manager.py
  20. 1 1
      taipy/rest/api/resources/cycle.py
  21. 1 1
      taipy/rest/api/resources/job.py
  22. 9 9
      tests/core/_manager/test_manager.py
  23. 4 1
      tests/core/_orchestrator/_dispatcher/test_development_job_dispatcher.py
  24. 6 6
      tests/core/_orchestrator/_dispatcher/test_dispatcher__execute_job.py
  25. 8 7
      tests/core/_orchestrator/_dispatcher/test_dispatcher__update_job_status.py
  26. 6 5
      tests/core/_orchestrator/_dispatcher/test_standalone_job_dispatcher.py
  27. 1 1
      tests/core/_orchestrator/test_orchestrator__cancel_jobs.py
  28. 2 2
      tests/core/_orchestrator/test_orchestrator__on_status_change.py
  29. 11 11
      tests/core/_orchestrator/test_orchestrator__submit.py
  30. 6 4
      tests/core/config/test_scenario_config.py
  31. 7 3
      tests/core/cycle/test_cycle.py
  32. 4 4
      tests/core/cycle/test_cycle_manager.py
  33. 1 1
      tests/core/cycle/test_cycle_repositories.py
  34. 1 1
      tests/core/data/test_aws_s3_data_node.py
  35. 9 2
      tests/core/data/test_csv_data_node.py
  36. 42 42
      tests/core/data/test_data_manager.py
  37. 36 25
      tests/core/data/test_data_node.py
  38. 13 5
      tests/core/data/test_excel_data_node.py
  39. 4 4
      tests/core/data/test_file_datanode_mixin.py
  40. 3 0
      tests/core/data/test_filter_csv_data_node.py
  41. 2 0
      tests/core/data/test_filter_data_node.py
  42. 7 0
      tests/core/data/test_filter_excel_data_node.py
  43. 3 0
      tests/core/data/test_filter_parquet_data_node.py
  44. 3 0
      tests/core/data/test_filter_sql_table_data_node.py
  45. 11 5
      tests/core/data/test_generic_data_node.py
  46. 4 2
      tests/core/data/test_in_memory_data_node.py
  47. 19 3
      tests/core/data/test_json_data_node.py
  48. 7 1
      tests/core/data/test_mongo_data_node.py
  49. 10 4
      tests/core/data/test_parquet_data_node.py
  50. 12 2
      tests/core/data/test_pickle_data_node.py
  51. 2 0
      tests/core/data/test_read_parquet_data_node.py
  52. 7 2
      tests/core/data/test_sql_data_node.py
  53. 3 2
      tests/core/data/test_sql_table_data_node.py
  54. 11 0
      tests/core/data/test_write_csv_data_node.py
  55. 16 0
      tests/core/data/test_write_multiple_sheet_excel_data_node.py
  56. 11 0
      tests/core/data/test_write_parquet_data_node.py
  57. 18 0
      tests/core/data/test_write_single_sheet_excel_data_node.py
  58. 5 0
      tests/core/data/test_write_sql_table_data_node.py
  59. 18 11
      tests/core/job/test_job.py
  60. 13 13
      tests/core/job/test_job_manager.py
  61. 69 61
      tests/core/scenario/test_scenario.py
  62. 28 33
      tests/core/scenario/test_scenario_manager.py
  63. 11 11
      tests/core/sequence/test_sequence.py
  64. 43 39
      tests/core/sequence/test_sequence_manager.py
  65. 18 17
      tests/core/submission/test_submission.py
  66. 12 9
      tests/core/submission/test_submission_manager.py
  67. 8 8
      tests/core/task/test_task.py
  68. 8 7
      tests/core/task/test_task_manager.py
  69. 3 3
      tests/core/task/test_task_model.py
  70. 31 31
      tests/core/test_taipy.py
  71. 5 7
      tests/gui_core/test_context_is_editable.py
  72. 4 4
      tests/gui_core/test_context_is_promotable.py
  73. 128 92
      tests/gui_core/test_context_is_readable.py
  74. 26 30
      tests/gui_core/test_context_on_file_action.py
  75. 18 19
      tests/gui_core/test_context_tabular_data_edit.py
  76. 19 17
      tests/gui_core/test_context_update_data.py
  77. 5 3
      tests/rest/conftest.py
  78. 2 2
      tests/rest/test_sequence.py

+ 1 - 1
taipy/core/_entity/_entity.py

@@ -33,7 +33,7 @@ class _Entity:
             for to_delete_key in self._properties._pending_deletions:
                 self._properties.data.pop(to_delete_key, None)
             self._properties.data.update(self._properties._pending_changes)
-        _Reloader()._get_manager(self._MANAGER_NAME)._set(self)
+        _Reloader()._get_manager(self._MANAGER_NAME)._update(self)
 
         for event in self._in_context_attributes_changed_collector:
             Notifier.publish(event)

+ 5 - 5
taipy/core/_entity/_reload.py

@@ -100,9 +100,9 @@ def _self_reload(manager: str):
 
 
 def _self_setter(manager):
-    def __set_entity(fct):
+    def __update_entity(fct):
         @functools.wraps(fct)
-        def _do_set_entity(self, *args, **kwargs):
+        def _do_update_entity(self, *args, **kwargs):
             fct(self, *args, **kwargs)
             value = args[0] if len(args) == 1 else args
             event = _make_event(
@@ -114,11 +114,11 @@ def _self_setter(manager):
             if not self._is_in_context:
                 entity = _Reloader()._reload(manager, self)
                 fct(entity, *args, **kwargs)
-                _Reloader._get_manager(manager)._set(entity)
+                _Reloader._get_manager(manager)._update(entity)
                 Notifier.publish(event)
             else:
                 self._in_context_attributes_changed_collector.append(event)
 
-        return _do_set_entity
+        return _do_update_entity
 
-    return __set_entity
+    return __update_entity

+ 7 - 4
taipy/core/_manager/_manager.py

@@ -15,7 +15,7 @@ from taipy.common.logger._taipy_logger import _TaipyLogger
 
 from .._entity._entity_ids import _EntityIds
 from .._repository._abstract_repository import _AbstractRepository
-from ..exceptions.exceptions import ModelNotFound
+from ..exceptions.exceptions import ModelNotFound, NonExistingEntity
 from ..notification import Event, EventOperation, Notifier
 from ..reason import EntityDoesNotExist, ReasonCollection
 
@@ -90,11 +90,14 @@ class _Manager(Generic[EntityType]):
             )
 
     @classmethod
-    def _set(cls, entity: EntityType):
+    def _update(cls, entity: EntityType):
         """
-        Save or update an entity.
+        Update an entity.
         """
-        cls._repository._save(entity)
+        if cls._repository._exists(entity.id):  # type: ignore[attr-defined]
+            cls._repository._save(entity)
+        else:
+            raise NonExistingEntity(entity.id)  # type: ignore[attr-defined]
 
     @classmethod
     def _get_all(cls, version_number: Optional[str] = "all") -> List[EntityType]:

+ 1 - 1
taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py

@@ -157,7 +157,7 @@ class _JobDispatcher(threading.Thread):
                 st = "".join(traceback.format_exception(type(e), value=e, tb=e.__traceback__))
                 job._stacktrace.append(st)
                 _TaipyLogger._get_logger().error(st)
-            _JobManagerFactory._build_manager()._set(job)
+            _JobManagerFactory._build_manager()._update(job)
         else:
             for output in job.task.output.values():
                 output.track_edit(job_id=job.id)

+ 5 - 5
taipy/core/_version/_cli/_version_cli.py

@@ -157,19 +157,19 @@ class _VersionCLI(_AbstractCLI):
         # Update the version of all entities
         for job in jobs:
             job._version = new_version
-            _JobManagerFactory._build_manager()._set(job)
+            _JobManagerFactory._build_manager()._update(job)
         for scenario in scenarios:
             scenario._version = new_version
-            _ScenarioManagerFactory._build_manager()._set(scenario)
+            _ScenarioManagerFactory._build_manager()._update(scenario)
         for sequence in sequences:
             sequence._version = new_version
-            _SequenceManagerFactory._build_manager()._set(sequence)
+            _SequenceManagerFactory._build_manager()._update(sequence)
         for task in tasks:
             task._version = new_version
-            _TaskManagerFactory._build_manager()._set(task)
+            _TaskManagerFactory._build_manager()._update(task)
         for datanode in datanodes:
             datanode._version = new_version
-            _DataManagerFactory._build_manager()._set(datanode)
+            _DataManagerFactory._build_manager()._update(datanode)
 
         # Rename the _Version entity
         _version_manager._rename_version(old_version, new_version)

+ 2 - 2
taipy/core/_version/_version_manager.py

@@ -68,7 +68,7 @@ class _VersionManager(_Manager[_Version]):
         else:
             version = _Version(id=id, config=Config._applied_config)  # type: ignore[attr-defined]
 
-        cls._set(version)
+        cls._repository._save(version)
         return version
 
     @classmethod
@@ -166,7 +166,7 @@ class _VersionManager(_Manager[_Version]):
 
         if not cls._get(new_version):
             version_entity.id = new_version
-            cls._set(version_entity)
+            cls._repository._save(version_entity)
 
     @classmethod
     def _manage_version(cls) -> None:

+ 1 - 1
taipy/core/cycle/_cycle_manager.py

@@ -39,7 +39,7 @@ class _CycleManager(_Manager[Cycle]):
         cycle = Cycle(
             frequency, properties, creation_date=creation_date, start_date=start_date, end_date=end_date, name=name
         )
-        cls._set(cycle)
+        cls._repository._save(cycle)
         _publish_event(
             cls._EVENT_ENTITY_TYPE,
             EventOperation.CREATION,

+ 3 - 3
taipy/core/data/_data_manager.py

@@ -66,7 +66,7 @@ class _DataManager(_Manager[DataNode], _VersionMixin):
             dn_configs_and_owner_id, cls._build_filters_with_version(None)
         )
         return {
-            dn_config: data_nodes.get((dn_config, owner_id)) or cls._create_and_set(dn_config, owner_id, None)
+            dn_config: data_nodes.get((dn_config, owner_id)) or cls._create(dn_config, owner_id, None)
             for dn_config, owner_id in dn_configs_and_owner_id
         }
 
@@ -84,11 +84,11 @@ class _DataManager(_Manager[DataNode], _VersionMixin):
         return reason_collection
 
     @classmethod
-    def _create_and_set(
+    def _create(
         cls, data_node_config: DataNodeConfig, owner_id: Optional[str], parent_ids: Optional[Set[str]]
     ) -> DataNode:
         data_node = cls.__create(data_node_config, owner_id, parent_ids)
-        cls._set(data_node)
+        cls._repository._save(data_node)
         Notifier.publish(_make_event(data_node, EventOperation.CREATION))
         return data_node
 

+ 8 - 6
taipy/core/data/_file_datanode_mixin.py

@@ -155,8 +155,7 @@ class _FileDataNodeMixin:
         try:
             upload_data = self._read_from_path(str(up_path))
         except Exception as err:
-            self.__logger.error(f"Error uploading `{up_path.name}` to data "
-                                f"node `{self.id}`:")  # type: ignore[attr-defined]
+            self.__logger.error(f"Error uploading `{up_path.name}` to data " f"node `{self.id}`:")  # type: ignore[attr-defined]
             self.__logger.error(f"Error: {err}")
             reasons._add_reason(self.id, UploadFileCanNotBeRead(up_path.name, self.id))  # type: ignore[attr-defined]
             return reasons
@@ -179,12 +178,15 @@ class _FileDataNodeMixin:
 
         shutil.copy(up_path, self.path)
 
-        self.track_edit(timestamp=datetime.now(),  # type: ignore[attr-defined]
-                        editor_id=editor_id,
-                        comment=comment, **kwargs)
+        self.track_edit(  # type: ignore[attr-defined]
+            timestamp=datetime.now(),
+            editor_id=editor_id,
+            comment=comment,
+            **kwargs,
+        )
         self.unlock_edit()  # type: ignore[attr-defined]
 
-        _DataManagerFactory._build_manager()._set(self)  # type: ignore[arg-type]
+        _DataManagerFactory._build_manager()._update(self)  # type: ignore[arg-type]
 
         return reasons
 

+ 2 - 2
taipy/core/data/data_node.py

@@ -444,7 +444,7 @@ class DataNode(_Entity, _Labeled):
         self._append(data)
         self.track_edit(editor_id=editor_id, comment=comment, **kwargs)
         self.unlock_edit()
-        _DataManagerFactory._build_manager()._set(self)
+        _DataManagerFactory._build_manager()._update(self)
 
     def write(
         self,
@@ -478,7 +478,7 @@ class DataNode(_Entity, _Labeled):
         self.unlock_edit()
         from ._data_manager_factory import _DataManagerFactory
 
-        _DataManagerFactory._build_manager()._set(self)
+        _DataManagerFactory._build_manager()._update(self)
 
     def track_edit(
         self,

+ 0 - 1
taipy/core/data/parquet.py

@@ -187,7 +187,6 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         # Ensure that the columns are strings, otherwise writing will fail with pandas 1.3.5
         df.columns = df.columns.astype(str)
         df.to_parquet(self._path, **kwargs)
-        self.track_edit(timestamp=datetime.now(), editor_id=editor_id)
 
     def read_with_kwargs(self, **read_kwargs):
         """Read data from this data node.

+ 24 - 1
taipy/core/exceptions/exceptions.py

@@ -28,8 +28,31 @@ class OrchestratorServiceIsAlreadyRunning(Exception):
     """Raised if the Orchestrator service is already running."""
 
 
+class EntityAlreadyExists(Exception):
+    """Raised if it is trying to create an Entity that has already existed."""
+
+
+class NonExistingEntity(Exception):
+    """Raised if a requested entity is not known by the Entity Manager."""
+
+    def __init__(self, entity_id: str):
+        self.message = f"Entity {entity_id} does not exist."
+
+
 class CycleAlreadyExists(Exception):
-    """Raised if it is trying to create a Cycle that has already exists."""
+    """Raised if it is trying to create a Cycle that has already existed."""
+
+
+class DataNodeAlreadyExists(Exception):
+    """Raised if it is trying to create a DataNode that has already existed."""
+
+
+class TaskAlreadyExists(Exception):
+    """Raised if it is trying to create a Task that has already existed."""
+
+
+class ScenarioAlreadyExists(Exception):
+    """Raised if it is trying to create a Scenario that has already existed."""
 
 
 class NonExistingCycle(Exception):

+ 1 - 1
taipy/core/job/_job_manager.py

@@ -52,7 +52,7 @@ class _JobManager(_Manager[Job], _VersionMixin):
             version=version,
         )
         job._on_status_change(*callbacks)
-        cls._set(job)
+        cls._repository._save(job)
 
         Notifier.publish(_make_event(job, EventOperation.CREATION))
 

+ 27 - 22
taipy/core/scenario/_scenario_duplicator.py

@@ -8,6 +8,7 @@
 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # specific language governing permissions and limitations under the License.
+
 from datetime import datetime
 from typing import Dict, Optional, Set, Union
 
@@ -29,7 +30,7 @@ from .scenario import Scenario
 class _ScenarioDuplicator:
     """A service to duplicate a scenario and related entities."""
 
-    def __init__(self, scenario: Scenario, data_to_duplicate: Union[bool, Set[str]]=True):
+    def __init__(self, scenario: Scenario, data_to_duplicate: Union[bool, Set[str]] = True):
         self.scenario: Scenario = scenario
         if data_to_duplicate is True:
             self.data_to_duplicate: Set[str] = set(self.scenario.data_nodes.keys())
@@ -44,12 +45,13 @@ class _ScenarioDuplicator:
         self.new_data_nodes: Dict[str, DataNode] = {}
 
         from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory
+
         self.__scenario_manager = _ScenarioManagerFactory._build_manager()
         self.__cycle_manager = _CycleManagerFactory._build_manager()
         self.__task_manager = _TaskManagerFactory._build_manager()
         self.__data_manager = _DataManagerFactory._build_manager()
 
-    def duplicate(self, new_creation_date: Optional[datetime]=None, new_name: Optional[str]=None) -> Scenario:
+    def duplicate(self, new_creation_date: Optional[datetime] = None, new_name: Optional[str] = None) -> Scenario:
         """Create a duplicated scenario with its related entities
 
         Create a scenario with the same configuration as the original scenario, but with
@@ -72,7 +74,7 @@ class _ScenarioDuplicator:
         for task in self.scenario.tasks.values():
             self.new_scenario._tasks.add(self._duplicate_task(task).id)  # type: ignore
         self._duplicate_sequences()
-        self.__scenario_manager._set(self.new_scenario)
+        self.__scenario_manager._repository._save(self.new_scenario)
         Notifier.publish(_make_event(self.new_scenario, EventOperation.CREATION))
         return self.new_scenario
 
@@ -81,23 +83,23 @@ class _ScenarioDuplicator:
             # Task and children data nodes already exist. No need to duplicate.
             self.new_tasks[task.config_id] = task
             task._parent_ids.update([self.new_scenario.id])
-            self.__task_manager._repository._save(task) # Through the repository so we don't set data nodes
+            self.__task_manager._repository._save(task)  # Through the repository so we don't set data nodes
             Notifier.publish(_make_event(task, EventOperation.UPDATE, "parent_ids", task._parent_ids))
             return task
         if task.scope == Scope.CYCLE and self.scenario.cycle.id == self.new_cycle_id:
             # Task and children data nodes already exist. No need to duplicate.
             self.new_tasks[task.config_id] = task
             task._parent_ids.update([self.new_scenario.id])
-            self.__task_manager._repository._save(task) # Through the repository so we don't set data nodes
+            self.__task_manager._repository._save(task)  # Through the repository so we don't set data nodes
             Notifier.publish(_make_event(task, EventOperation.UPDATE, "parent_ids", task._parent_ids))
             return task
         if task.scope == Scope.CYCLE:
             existing_tasks = self.__task_manager._repository._get_by_configs_and_owner_ids(  # type: ignore
-                [(task.config_id, self.new_cycle_id)],
-                self.__task_manager._build_filters_with_version(None))
+                [(task.config_id, self.new_cycle_id)], self.__task_manager._build_filters_with_version(None)
+            )
             if existing_tasks:
                 # Task and children data nodes already exist. No need to duplicate.
-                existing_t = existing_tasks[(task.config_id,self.new_cycle_id)]
+                existing_t = existing_tasks[(task.config_id, self.new_cycle_id)]
                 self.new_tasks[task.config_id] = existing_t
                 existing_t._parent_ids.update([self.new_scenario.id])
                 self.__task_manager._repository._save(existing_t)  # Don't set data nodes
@@ -111,39 +113,39 @@ class _ScenarioDuplicator:
             new_task._output[output.config_id] = self._duplicate_datanode(output, new_task)
         self.new_tasks[task.config_id] = new_task
 
-        self.__task_manager._set(new_task)
+        self.__task_manager._repository._save(new_task)
         Notifier.publish(_make_event(new_task, EventOperation.CREATION))
         return new_task
 
-    def _duplicate_datanode(self, dn: DataNode, task: Optional[Task]=None) -> DataNode:
+    def _duplicate_datanode(self, dn: DataNode, task: Optional[Task] = None) -> DataNode:
         if dn.config_id in self.new_data_nodes:
             # Data node already created from another task. No need to duplicate.
             new_dn = self.new_data_nodes[dn.config_id]
             new_dn._parent_ids.update([task.id]) if task else new_dn._parent_ids.update([self.new_scenario.id])
-            self.__data_manager._set(new_dn)
+            self.__data_manager._repository._save(new_dn)
             Notifier.publish(_make_event(new_dn, EventOperation.UPDATE, "parent_ids", new_dn._parent_ids))
             return new_dn
         if dn.scope == Scope.GLOBAL:
             # Data node already exists. No need to duplicate.
             dn._parent_ids.update([task.id]) if task else dn._parent_ids.update([self.new_scenario.id])
-            self.__data_manager._set(dn)
+            self.__data_manager._update(dn)
             Notifier.publish(_make_event(dn, EventOperation.UPDATE, "parent_ids", dn._parent_ids))
             return dn
         if dn.scope == Scope.CYCLE and self.scenario.cycle.id == self.new_cycle_id:
             # Data node already exists. No need to duplicate.
             dn._parent_ids.update([task.id]) if task else dn._parent_ids.update([self.new_scenario.id])
-            self.__data_manager._set(dn)
+            self.__data_manager._update(dn)
             Notifier.publish(_make_event(dn, EventOperation.UPDATE, "parent_ids", dn._parent_ids))
             return dn
         if dn.scope == Scope.CYCLE:
             existing_dns = self.__data_manager._repository._get_by_configs_and_owner_ids(  # type: ignore
-                [(dn.config_id, self.new_cycle_id)],
-                self.__data_manager._build_filters_with_version(None))
+                [(dn.config_id, self.new_cycle_id)], self.__data_manager._build_filters_with_version(None)
+            )
             if existing_dns.get((dn.config_id, self.new_cycle_id)):
                 ex_dn = existing_dns[(dn.config_id, self.new_cycle_id)]
                 # A cycle data node with same config and same cycle owner already exist. No need to duplicate it.
                 ex_dn._parent_ids.update([task.id]) if task else ex_dn._parent_ids.update([self.new_scenario.id])
-                self.__data_manager._set(ex_dn)
+                self.__data_manager._update(ex_dn)
                 Notifier.publish(_make_event(ex_dn, EventOperation.UPDATE, "parent_ids", ex_dn._parent_ids))
                 return ex_dn
 
@@ -154,7 +156,7 @@ class _ScenarioDuplicator:
                 duplicator.duplicate_data(new_dn)
 
         self.new_data_nodes[dn.config_id] = new_dn
-        self.__data_manager._set(new_dn)
+        self.__data_manager._repository._save(new_dn)
         Notifier.publish(_make_event(new_dn, EventOperation.CREATION))
         return new_dn
 
@@ -162,12 +164,14 @@ class _ScenarioDuplicator:
         new_sequences = {}
         for seq_name, seq_data in self.scenario._sequences.items():
             new_sequence_id = Sequence._new_id(seq_name, self.new_scenario.id)
-            new_sequence = {Scenario._SEQUENCE_PROPERTIES_KEY: seq_data[Scenario._SEQUENCE_PROPERTIES_KEY],
-                            Scenario._SEQUENCE_TASKS_KEY: []}  # We do not want to duplicate the subscribers
+            new_sequence = {
+                Scenario._SEQUENCE_PROPERTIES_KEY: seq_data[Scenario._SEQUENCE_PROPERTIES_KEY],
+                Scenario._SEQUENCE_TASKS_KEY: [],
+            }  # We do not want to duplicate the subscribers
             for task in seq_data[Scenario._SEQUENCE_TASKS_KEY]:
                 new_task = self.new_tasks[task.config_id]
                 new_task._parent_ids.update([new_sequence_id])
-                self.__task_manager._set(new_task)
+                self.__task_manager._repository._save(new_task)
                 new_sequence[Scenario._SEQUENCE_TASKS_KEY].append(self.new_tasks[task.config_id])
             new_sequences[seq_name] = new_sequence
         self.new_scenario._sequences = new_sequences
@@ -175,8 +179,9 @@ class _ScenarioDuplicator:
     def __init_new_scenario(self, new_creation_date: datetime, new_name: Optional[str]) -> Scenario:
         self.new_scenario = self.__scenario_manager._get(self.scenario)
         self.new_scenario.id = self.new_scenario._new_id(self.scenario.config_id)
+        self.__scenario_manager._repository._save(self.new_scenario)
         self.new_scenario._creation_date = new_creation_date
-        if frequency:= Config.scenarios[self.scenario.config_id].frequency:
+        if frequency := Config.scenarios[self.scenario.config_id].frequency:
             cycle = self.__cycle_manager._get_or_create(frequency, new_creation_date)
             self.new_scenario._cycle = cycle
             self.new_scenario._primary_scenario = len(self.__scenario_manager._get_all_by_cycle(cycle)) == 0
@@ -205,7 +210,7 @@ class _ScenarioDuplicator:
         new_task._output = {}  # To be potentially updated later
         return new_task
 
-    def __init_new_datanode(self, dn: DataNode, task: Optional[Task]=None) -> DataNode:
+    def __init_new_datanode(self, dn: DataNode, task: Optional[Task] = None) -> DataNode:
         new_dn = self.__data_manager._get(dn)
         new_dn.id = DataNode._new_id(dn._config_id)
         new_dn._owner_id = self.new_scenario.id if dn.scope == Scope.SCENARIO else self.new_cycle_id

+ 6 - 6
taipy/core/scenario/_scenario_manager.py

@@ -189,14 +189,14 @@ class _ScenarioManager(_Manager[Scenario], _VersionMixin):
         for task in tasks:
             if scenario_id not in task._parent_ids:
                 task._parent_ids.update([scenario_id])
-                _task_manager._set(task)
+                _task_manager._update(task)
 
         for dn in additional_data_nodes.values():
             if scenario_id not in dn._parent_ids:
                 dn._parent_ids.update([scenario_id])
-                _data_manager._set(dn)
+                _data_manager._update(dn)
 
-        cls._set(scenario)
+        cls._repository._save(scenario)
 
         if not scenario._is_consistent():
             raise InvalidScenario(scenario.id)
@@ -381,7 +381,7 @@ class _ScenarioManager(_Manager[Scenario], _VersionMixin):
         if len(tags) > 0 and tag not in tags:
             raise UnauthorizedTagError(f"Tag `{tag}` not authorized by scenario configuration `{scenario.config_id}`")
         scenario._add_tag(tag)
-        cls._set(scenario)
+        cls._update(scenario)
         Notifier.publish(
             _make_event(scenario, EventOperation.UPDATE, attribute_name="tags", attribute_value=scenario.tags)
         )
@@ -389,7 +389,7 @@ class _ScenarioManager(_Manager[Scenario], _VersionMixin):
     @classmethod
     def _untag(cls, scenario: Scenario, tag: str) -> None:
         scenario._remove_tag(tag)
-        cls._set(scenario)
+        cls._update(scenario)
         Notifier.publish(
             _make_event(scenario, EventOperation.UPDATE, attribute_name="tags", attribute_value=scenario.tags)
         )
@@ -529,7 +529,7 @@ class _ScenarioManager(_Manager[Scenario], _VersionMixin):
         scenario: Scenario,
         new_creation_date: Optional[datetime] = None,
         new_name: Optional[str] = None,
-        data_to_duplicate: Union[bool, Set[str]] = True
+        data_to_duplicate: Union[bool, Set[str]] = True,
     ) -> Scenario:
         """Create a duplicated scenario with its related entities.
 

+ 5 - 5
taipy/core/sequence/_sequence_manager.py

@@ -92,7 +92,7 @@ class _SequenceManager(_Manager[Sequence], _VersionMixin):
                 scenario = scenario_manager._get(scenario_id)
                 for sequence_name in sequence_names:
                     del scenario._sequences[sequence_name]
-                scenario_manager._set(scenario)
+                scenario_manager._update(scenario)
 
             if hasattr(cls, "_EVENT_ENTITY_TYPE"):
                 for sequence_id in sequence_ids:
@@ -117,9 +117,9 @@ class _SequenceManager(_Manager[Sequence], _VersionMixin):
         cls._delete_entities_of_multiple_types(entity_ids_to_delete)
 
     @classmethod
-    def _set(cls, sequence: Sequence) -> None:
+    def _update(cls, sequence: Sequence) -> None:
         """
-        Save or update a Sequence.
+        Update a Sequence.
         """
         sequence_name, scenario_id = cls._breakdown_sequence_id(sequence.id)
         scenario_manager = _ScenarioManagerFactory._build_manager()
@@ -131,7 +131,7 @@ class _SequenceManager(_Manager[Sequence], _VersionMixin):
                 Scenario._SEQUENCE_PROPERTIES_KEY: sequence._properties.data,
             }
             scenario._sequences[sequence_name] = sequence_data
-            scenario_manager._set(scenario)
+            scenario_manager._update(scenario)
         else:
             cls._logger.error(f"Sequence {sequence.id} belongs to a non-existing Scenario {scenario_id}.")
             raise SequenceBelongsToNonExistingScenario(sequence.id, scenario_id)
@@ -214,7 +214,7 @@ class _SequenceManager(_Manager[Sequence], _VersionMixin):
         for task in _tasks:
             if sequence_id not in task._parent_ids:
                 task._parent_ids.update([sequence_id])
-                task_manager._set(task)
+                task_manager._update(task)
 
         if not sequence._is_consistent():
             raise InvalidSequence(sequence_id)

+ 3 - 3
taipy/core/submission/_submission_manager.py

@@ -48,7 +48,7 @@ class _SubmissionManager(_Manager[Submission], _VersionMixin):
         submission = Submission(
             entity_id=entity_id, entity_type=entity_type, entity_config_id=entity_config, properties=properties
         )
-        cls._set(submission)
+        cls._repository._save(submission)
 
         Notifier.publish(_make_event(submission, EventOperation.CREATION))
 
@@ -90,7 +90,7 @@ class _SubmissionManager(_Manager[Submission], _VersionMixin):
                 submission._running_jobs.discard(job.id)
                 submission._blocked_jobs.discard(job.id)
                 submission._pending_jobs.discard(job.id)
-            cls._set(submission)
+            cls._update(submission)
 
             # The submission_status is set later to make sure notification for updating
             # the submission_status attribute is triggered
@@ -119,7 +119,7 @@ class _SubmissionManager(_Manager[Submission], _VersionMixin):
         _current_submission_status = submission._submission_status
         submission._submission_status = new_submission_status
 
-        cls._set(submission)
+        cls._update(submission)
 
         if _current_submission_status != submission._submission_status:
             event = _make_event(

+ 18 - 12
taipy/core/taipy.py

@@ -25,7 +25,7 @@ from .common._check_instance import (
     _is_submission,
     _is_task,
 )
-from .common._warnings import _warn_no_orchestrator_service
+from .common._warnings import _warn_deprecated, _warn_no_orchestrator_service
 from .common.scope import Scope
 from .config.data_node_config import DataNodeConfig
 from .config.scenario_config import ScenarioConfig
@@ -57,26 +57,32 @@ __logger = _TaipyLogger._get_logger()
 
 
 def set(entity: Union[DataNode, Task, Sequence, Scenario, Cycle, Submission]):
-    """Save or update an entity.
+    """Deprecated in favor of `update()` since 4.1.0."""
+    _warn_deprecated("set()", suggest="update()")
+    return update(entity)
 
-    This function allows you to save or update an entity in Taipy.
+
+def update(entity: Union[DataNode, Task, Sequence, Scenario, Cycle, Submission]):
+    """Update an entity.
+
+    This function allows you to update an entity in Taipy.
 
     Arguments:
         entity (Union[DataNode^, Task^, Sequence^, Scenario^, Cycle^, Submission^]): The
-            entity to save or update.
+            entity to update.
     """
     if isinstance(entity, Cycle):
-        return _CycleManagerFactory._build_manager()._set(entity)
+        return _CycleManagerFactory._build_manager()._update(entity)
     if isinstance(entity, Scenario):
-        return _ScenarioManagerFactory._build_manager()._set(entity)
+        return _ScenarioManagerFactory._build_manager()._update(entity)
     if isinstance(entity, Sequence):
-        return _SequenceManagerFactory._build_manager()._set(entity)
+        return _SequenceManagerFactory._build_manager()._update(entity)
     if isinstance(entity, Task):
-        return _TaskManagerFactory._build_manager()._set(entity)
+        return _TaskManagerFactory._build_manager()._update(entity)
     if isinstance(entity, DataNode):
-        return _DataManagerFactory._build_manager()._set(entity)
+        return _DataManagerFactory._build_manager()._update(entity)
     if isinstance(entity, Submission):
-        return _SubmissionManagerFactory._build_manager()._set(entity)
+        return _SubmissionManagerFactory._build_manager()._update(entity)
 
 
 def is_submittable(entity: Union[Scenario, ScenarioId, Sequence, SequenceId, Task, TaskId, str]) -> ReasonCollection:
@@ -933,7 +939,7 @@ def create_global_data_node(config: DataNodeConfig) -> DataNode:
 
     if dns := _DataManagerFactory._build_manager()._get_by_config_id(config.id):
         return dns[0]
-    return _DataManagerFactory._build_manager()._create_and_set(config, None, None)
+    return _DataManagerFactory._build_manager()._create(config, None, None)
 
 
 def clean_all_entities(version_number: str) -> bool:
@@ -1085,7 +1091,7 @@ def duplicate_scenario(
     scenario: Scenario,
     new_creation_date: Optional[datetime] = None,
     new_name: Optional[str] = None,
-    data_to_duplicate: Union[Set[str], bool] = True
+    data_to_duplicate: Union[Set[str], bool] = True,
 ) -> Scenario:
     """Duplicate an existing scenario and return a new scenario.
 

+ 12 - 11
taipy/core/task/_task_manager.py

@@ -9,6 +9,7 @@
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # specific language governing permissions and limitations under the License.
 
+import itertools
 from typing import Callable, List, Optional, Type, Union, cast
 
 from taipy.common.config import Config
@@ -52,10 +53,16 @@ class _TaskManager(_Manager[Task], _VersionMixin):
         return _OrchestratorFactory._build_orchestrator()
 
     @classmethod
-    def _set(cls, task: Task) -> None:
-        cls.__save_data_nodes(task.input.values())
-        cls.__save_data_nodes(task.output.values())
-        super()._set(task)
+    def _create(cls, task: Task) -> None:
+        for dn in itertools.chain(task.input.values(), task.output.values()):
+            _DataManagerFactory._build_manager()._repository._save(dn)
+        cls._repository._save(task)
+
+    @classmethod
+    def _update(cls, task: Task) -> None:
+        for dn in itertools.chain(task.input.values(), task.output.values()):
+            _DataManagerFactory._build_manager()._update(dn)
+        super()._update(task)
 
     @classmethod
     def _get_owner_id(
@@ -127,7 +134,7 @@ class _TaskManager(_Manager[Task], _VersionMixin):
                 )
                 for dn in set(inputs + outputs):
                     dn._parent_ids.update([task.id])
-                cls._set(task)
+                cls._create(task)
                 Notifier.publish(_make_event(task, EventOperation.CREATION))
                 tasks.append(task)
         return tasks
@@ -140,12 +147,6 @@ class _TaskManager(_Manager[Task], _VersionMixin):
         filters = cls._build_filters_with_version(version_number)
         return cls._repository._load_all(filters)
 
-    @classmethod
-    def __save_data_nodes(cls, data_nodes) -> None:
-        data_manager = _DataManagerFactory._build_manager()
-        for i in data_nodes:
-            data_manager._set(i)
-
     @classmethod
     def _hard_delete(cls, task_id: TaskId) -> None:
         task = cls._get(task_id)

+ 1 - 1
taipy/rest/api/resources/cycle.py

@@ -401,7 +401,7 @@ class CycleList(Resource):
         manager = _CycleManagerFactory._build_manager()
 
         cycle = self.__create_cycle_from_schema(schema.load(request.json))
-        manager._set(cycle)
+        manager._repository._save(cycle)
 
         return {
             "message": "Cycle was created.",

+ 1 - 1
taipy/rest/api/resources/job.py

@@ -220,7 +220,7 @@ class JobList(Resource):
         manager = _JobManagerFactory._build_manager()
         schema = JobSchema()
         job = self.__create_job_from_schema(task_config_id)
-        manager._set(job)
+        manager._repository._save(job)
         return {
             "message": "Job was created.",
             "job": schema.dump(job),

+ 9 - 9
tests/core/_manager/test_manager.py

@@ -110,19 +110,19 @@ class MockManager(_Manager[MockEntity]):
 class TestManager:
     def test_save_and_fetch_model(self):
         m = MockEntity("uuid", "foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
 
         fetched_model = MockManager._get(m.id)
         assert m == fetched_model
 
     def test_exists(self):
         m = MockEntity("uuid", "foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
         assert MockManager._exists(m.id)
 
     def test_get(self):
         m = MockEntity("uuid", "foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
         assert MockManager._get(m.id) == m
 
     def test_get_all(self):
@@ -132,14 +132,14 @@ class TestManager:
         for i in range(5):
             m = MockEntity(f"uuid-{i}", f"Foo{i}")
             objs.append(m)
-            MockManager._set(m)
+            MockManager._repository._save(m)
         _objs = MockManager._get_all()
 
         assert len(_objs) == 5
 
     def test_delete(self):
         m = MockEntity("uuid", "foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
         MockManager._delete(m.id)
         assert MockManager._get(m.id) is None
 
@@ -148,7 +148,7 @@ class TestManager:
         for i in range(5):
             m = MockEntity(f"uuid-{i}", f"Foo{i}")
             objs.append(m)
-            MockManager._set(m)
+            MockManager._repository._save(m)
         MockManager._delete_all()
         assert MockManager._get_all() == []
 
@@ -157,13 +157,13 @@ class TestManager:
         for i in range(5):
             m = MockEntity(f"uuid-{i}", f"Foo{i}")
             objs.append(m)
-            MockManager._set(m)
+            MockManager._repository._save(m)
         MockManager._delete_many(["uuid-0", "uuid-1"])
         assert len(MockManager._get_all()) == 3
 
     def test_is_editable(self):
         m = MockEntity("uuid", "Foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
         assert MockManager._is_editable(m)
 
         rc = MockManager._is_editable("some_entity")
@@ -172,7 +172,7 @@ class TestManager:
 
     def test_is_readable(self):
         m = MockEntity("uuid", "Foo")
-        MockManager._set(m)
+        MockManager._repository._save(m)
         assert MockManager._is_readable(m)
 
         rc = MockManager._is_editable("some_entity")

+ 4 - 1
tests/core/_orchestrator/_dispatcher/test_development_job_dispatcher.py

@@ -15,6 +15,7 @@ from unittest.mock import patch
 from taipy.core import JobId
 from taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
 from taipy.core.job.job import Job
 from taipy.core.task._task_manager_factory import _TaskManagerFactory
 from taipy.core.task.task import Task
@@ -26,13 +27,14 @@ def nothing(*args):
 
 def create_task():
     task = Task("config_id", {}, nothing, [], [])
-    _TaskManagerFactory._build_manager()._set(task)
+    _TaskManagerFactory._build_manager()._repository._save(task)
     return task
 
 
 def test_dispatch_executes_the_function_no_exception():
     task = create_task()
     job = Job(JobId("job"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._repository._save(job)
     dispatcher = _OrchestratorFactory._build_dispatcher()
 
     with patch("taipy.core._orchestrator._dispatcher._task_function_wrapper._TaskFunctionWrapper.execute") as mck:
@@ -48,6 +50,7 @@ def test_dispatch_executes_the_function_no_exception():
 def test_dispatch_executes_the_function_with_exceptions():
     task = create_task()
     job = Job(JobId("job"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._repository._save(job)
     dispatcher = _OrchestratorFactory._build_dispatcher()
     e_1 = Exception("test")
     e_2 = Exception("test")

+ 6 - 6
tests/core/_orchestrator/_dispatcher/test_dispatcher__execute_job.py

@@ -38,7 +38,7 @@ def test_execute_job():
     scenario.t1.skippable = True  # make the job skippable
     scenario.dn.lock_edit()  # lock output edit
     job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"))
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
         with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
             mck_2.return_value = True
@@ -56,7 +56,7 @@ def test_execute_job_to_skip():
     scenario.t1.skippable = True  # make the job skippable
     scenario.dn.lock_edit()  # lock output edit
     job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"))
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
 
     with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
         with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
@@ -74,7 +74,7 @@ def test_execute_job_skippable_with_force():
     scenario.t1.skippable = True  # make the job skippable
     scenario.dn.lock_edit()  # lock output edit
     job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"), force=True)
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
 
     with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
         with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
@@ -90,11 +90,11 @@ def test_execute_job_skippable_with_force():
 
 def test_execute_jobs_synchronously():
     task = Task("config_id", {}, nothing, [], [])
-    _TaskManagerFactory._build_manager()._set(task)
+    _TaskManagerFactory._build_manager()._repository._save(task)
     job_1 = Job(JobId("job1"), task, "s_id", task.id)
     job_2 = Job(JobId("job2"), task, "s_id", task.id)
-    _JobManagerFactory._build_manager()._set(job_1)
-    _JobManagerFactory._build_manager()._set(job_2)
+    _JobManagerFactory._build_manager()._repository._save(job_1)
+    _JobManagerFactory._build_manager()._repository._save(job_2)
     orchestrator = _OrchestratorFactory._build_orchestrator()
     orchestrator.jobs_to_run.put(job_1)
     orchestrator.jobs_to_run.put(job_2)

+ 8 - 7
tests/core/_orchestrator/_dispatcher/test_dispatcher__update_job_status.py

@@ -26,12 +26,13 @@ def nothing(*args):
 def _error():
     raise RuntimeError("Something bad has happened")
 
+
 def test_update_job_status_no_exception():
     output = InMemoryDataNode("data_node", scope=Scope.SCENARIO)
-    task = Task("config_id",  {}, nothing, output=[output])
-    _TaskManagerFactory._build_manager()._set(task)
+    task = Task("config_id", {}, nothing, output=[output])
+    _TaskManagerFactory._build_manager()._create(task)
     job = Job(JobId("id"), task, "s_id", task.id)
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
 
     _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, None)
 
@@ -49,9 +50,9 @@ def test_update_job_status_no_exception():
 
 def test_update_job_status_with_one_exception():
     task = Task("config_id", {}, nothing)
-    _TaskManagerFactory._build_manager()._set(task)
+    _TaskManagerFactory._build_manager()._repository._save(task)
     job = Job(JobId("id"), task, "s_id", task.id)
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     e = Exception("test")
     _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, [e])
 
@@ -62,9 +63,9 @@ def test_update_job_status_with_one_exception():
 
 def test_update_job_status_with_exceptions():
     task = Task("config_id", {}, nothing)
-    _TaskManagerFactory._build_manager()._set(task)
+    _TaskManagerFactory._build_manager()._repository._save(task)
     job = Job(JobId("id"), task, "s_id", task.id)
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     e_1 = Exception("test1")
     e_2 = Exception("test2")
     _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, [e_1, e_2])

+ 6 - 5
tests/core/_orchestrator/_dispatcher/test_standalone_job_dispatcher.py

@@ -32,7 +32,7 @@ def nothing(*args):
 
 def create_task():
     task = Task("config_id", {}, nothing, [], [])
-    _TaskManagerFactory._build_manager()._set(task)
+    _TaskManagerFactory._build_manager()._repository._save(task)
     return task
 
 
@@ -92,6 +92,7 @@ def test_can_execute():
 def test_update_job_status_from_future():
     task = create_task()
     job = Job(JobId("job"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._repository._save(job)
     orchestrator = _OrchestratorFactory._build_orchestrator()
     dispatcher = _StandaloneJobDispatcher(orchestrator)
     ft = Future()
@@ -108,10 +109,10 @@ def test_run():
     job_2 = Job(JobId("job2"), task, "s_id", task.id)
     job_3 = Job(JobId("job3"), task, "s_id", task.id)
     job_4 = Job(JobId("job4"), task, "s_id", task.id)
-    _JobManagerFactory._build_manager()._set(job_1)
-    _JobManagerFactory._build_manager()._set(job_2)
-    _JobManagerFactory._build_manager()._set(job_3)
-    _JobManagerFactory._build_manager()._set(job_4)
+    _JobManagerFactory._build_manager()._repository._save(job_1)
+    _JobManagerFactory._build_manager()._repository._save(job_2)
+    _JobManagerFactory._build_manager()._repository._save(job_3)
+    _JobManagerFactory._build_manager()._repository._save(job_4)
     orchestrator = _OrchestratorFactory._build_orchestrator()
     orchestrator.jobs_to_run.put(job_1)
     orchestrator.jobs_to_run.put(job_2)

+ 1 - 1
tests/core/_orchestrator/test_orchestrator__cancel_jobs.py

@@ -27,7 +27,7 @@ def create_job(status):
     t_cfg = Config.configure_task("no_output", nothing, [], [])
     t = _TaskManagerFactory._build_manager()._bulk_get_or_create([t_cfg])
     job = Job(JobId("foo"), t[0], "", "")
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     job.status = status
     return job
 

+ 2 - 2
tests/core/_orchestrator/test_orchestrator__on_status_change.py

@@ -26,14 +26,14 @@ def create_job(id, status):
     t_cfg = Config.configure_task("no_output", nothing, [], [])
     t = _TaskManagerFactory._build_manager()._bulk_get_or_create([t_cfg])
     job = Job(JobId(id), t[0], "", "")
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     job.status = status
     return job
 
 
 def create_job_from_task(id, task):
     job = Job(JobId(id), task, "s", task.id)
-    _JobManagerFactory._build_manager()._set(job)
+    _JobManagerFactory._build_manager()._repository._save(job)
     return job
 
 

+ 11 - 11
tests/core/_orchestrator/test_orchestrator__submit.py

@@ -492,13 +492,13 @@ def test_submit_submittable_generate_unique_submit_id():
     task_1 = Task("task_config_id_1", {}, print, [dn_1])
     task_2 = Task("task_config_id_2", {}, print, [dn_1], [dn_2])
 
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
+    _DataManager._repository._save(dn_1)
+    _DataManager._repository._save(dn_2)
+    _TaskManager._repository._save(task_1)
+    _TaskManager._repository._save(task_2)
 
     scenario = Scenario("scenario", {task_1, task_2}, {})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     jobs_1 = taipy.submit(scenario).jobs
     jobs_2 = taipy.submit(scenario).jobs
@@ -525,11 +525,11 @@ def test_submit_duration_development_mode():
     task_1 = Task("task_config_id_1", {}, task_sleep_1, [], [])
     task_2 = Task("task_config_id_2", {}, task_sleep_2, [], [])
 
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
+    _TaskManager._repository._save(task_1)
+    _TaskManager._repository._save(task_2)
 
     scenario = Scenario("scenario", {task_1, task_2}, {})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     submission = taipy.submit(scenario)
     jobs = submission.jobs
     orchestrator.stop()
@@ -557,11 +557,11 @@ def test_submit_duration_standalone_mode():
     task_1 = Task("task_config_id_1", {}, task_sleep_1, [], [])
     task_2 = Task("task_config_id_2", {}, task_sleep_2, [], [])
 
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
+    _TaskManager._repository._save(task_1)
+    _TaskManager._repository._save(task_2)
 
     scenario = Scenario("scenario", {task_1, task_2}, {})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     submission = taipy.submit(scenario)
 
     assert_true_after_time(lambda: all(job is not None and job.is_completed() for job in submission.jobs))

+ 6 - 4
tests/core/config/test_scenario_config.py

@@ -129,10 +129,10 @@ def test_datanode_config_ranks():
 
     # s1 additional: dn3
     # s1 dag:  dn1 -> dn2
-    Config.configure_scenario("s1", [task_config_1],[dn_config_3])
+    Config.configure_scenario("s1", [task_config_1], [dn_config_3])
     # s2 additional: dn4
     # s2 dag:  dn2 -> dn3
-    Config.configure_scenario("s2", [task_config_2],[dn_config_4])
+    Config.configure_scenario("s2", [task_config_2], [dn_config_4])
     # s3 additional: None
     # s3 dag:  dn1 -> dn2 -> dn3
     Config.configure_scenario("s3", [task_config_1, task_config_2])
@@ -228,7 +228,7 @@ def test_scenario_creation_no_duplication():
     assert len(Config.scenarios) == 2
 
 
-def test_scenario_get_set_and_remove_comparators():
+def test_scenario_add_get_and_remove_comparators():
     task_config_1 = Config.configure_task("task1", my_func)
     task_config_2 = Config.configure_task("task2", print)
     dn_config_1 = "dn_config_1"
@@ -361,6 +361,7 @@ def test_add_sequence():
     scenario_config.remove_sequences(["sequence2", "sequence3"])
     assert len(scenario_config.sequences) == 0
 
+
 @pytest.mark.skip(reason="Generates a png that must be visually verified.")
 def test_draw_1():
     dn_config_1 = Config.configure_data_node("dn1")
@@ -378,6 +379,7 @@ def test_draw_1():
     )
     scenario_cfg.draw()
 
+
 @pytest.mark.skip(reason="Generates a png that must be visually verified.")
 def test_draw_2():
     data_node_1 = Config.configure_data_node("s1")
@@ -403,6 +405,7 @@ def test_draw_2():
     #         0        1         2          3          4
     scenario_cfg.draw("draw_2")
 
+
 @pytest.mark.skip(reason="Generates a png that must be visually verified.")
 def test_draw_3():
     data_node_1 = Config.configure_data_node("s1")
@@ -420,7 +423,6 @@ def test_draw_3():
     task_5 = Config.configure_task("t5", print, [data_node_7], None)
     scenario_cfg = Config.configure_scenario("scenario1", [task_5, task_3, task_4, task_2, task_1])
 
-
     #  12 |  s7 __
     #  11 |       \
     #  10 |        \

+ 7 - 3
tests/core/cycle/test_cycle.py

@@ -24,7 +24,7 @@ def test_cycle_equals(cycle):
     cycle_manager = _CycleManagerFactory()._build_manager()
 
     cycle_id = cycle.id
-    cycle_manager._set(cycle)
+    cycle_manager._repository._save(cycle)
 
     # To test if instance is same type
     task = Task("task", {}, print, [], [], cycle_id)
@@ -94,6 +94,8 @@ def test_cycle_label(current_datetime):
         start_date=current_datetime,
         end_date=current_datetime,
     )
+    _CycleManagerFactory()._build_manager()._repository._save(cycle)
+
     assert cycle.get_label() == cycle.name
     assert cycle.get_simple_label() == cycle.name
 
@@ -111,6 +113,8 @@ def test_add_property_to_scenario(current_datetime):
         current_datetime,
         name="foo",
     )
+    _CycleManagerFactory()._build_manager()._repository._save(cycle)
+
     assert cycle.properties == {"key": "value"}
     assert cycle.properties["key"] == "value"
 
@@ -121,7 +125,7 @@ def test_add_property_to_scenario(current_datetime):
     assert cycle.properties["new_key"] == "new_value"
 
 
-def test_auto_set_and_reload(current_datetime):
+def test_auto_update_and_reload(current_datetime):
     cycle_1 = Cycle(
         Frequency.WEEKLY,
         {"key": "value"},
@@ -131,7 +135,7 @@ def test_auto_set_and_reload(current_datetime):
         name="foo",
     )
 
-    _CycleManager._set(cycle_1)
+    _CycleManager._repository._save(cycle_1)
     cycle_2 = _CycleManager._get(cycle_1)
 
     # auto set & reload on frequency attribute

+ 4 - 4
tests/core/cycle/test_cycle_manager.py

@@ -29,7 +29,7 @@ def test_save_and_get_cycle_entity(tmpdir, cycle, current_datetime):
 
     assert len(_CycleManager._get_all()) == 0
 
-    _CycleManager._set(cycle)
+    _CycleManager._repository._save(cycle)
     assert _CycleManager._exists(cycle.id)
 
     cycle_1 = _CycleManager._get(cycle.id)
@@ -62,7 +62,7 @@ def test_save_and_get_cycle_entity(tmpdir, cycle, current_datetime):
         name="bar",
         id=cycle_1.id,
     )
-    _CycleManager._set(cycle_3)
+    _CycleManager._repository._save(cycle_3)
 
     cycle_3 = _CycleManager._get(cycle_1.id)
 
@@ -242,10 +242,10 @@ def test_get_primary(tmpdir, cycle, current_datetime):
 
     assert len(_CycleManager._get_all()) == 0
 
-    _CycleManager._set(cycle)
+    _CycleManager._repository._save(cycle)
     cycle_1 = _CycleManager._get(cycle.id)
     cycle_2 = Cycle(Frequency.MONTHLY, {}, current_datetime, current_datetime, current_datetime, name="foo")
-    _CycleManager._set(cycle_2)
+    _CycleManager._repository._save(cycle_2)
     cycle_2 = _CycleManager._get(cycle_2.id)
     cycles = _CycleManager._get_all()
     assert len(_CycleManager._get_all()) == 2

+ 1 - 1
tests/core/cycle/test_cycle_repositories.py

@@ -101,8 +101,8 @@ class TestCycleRepositories:
 
         for i in range(10):
             cycle.id = CycleId(f"cycle-{i}")
-            cycle.name = f"cycle-{i}"
             repository._save(cycle)
+            cycle.name = f"cycle-{i}"
 
         assert len(repository._load_all()) == 10
 

+ 1 - 1
tests/core/data/test_aws_s3_data_node.py

@@ -41,7 +41,7 @@ class TestS3ObjectDataNode:
     @pytest.mark.parametrize("properties", __properties)
     def test_create(self, properties):
         s3_object_dn_config = Config.configure_s3_object_data_node(id="foo_bar_aws_s3", **properties)
-        aws_s3_object_dn = _DataManagerFactory._build_manager()._create_and_set(s3_object_dn_config, None, None)
+        aws_s3_object_dn = _DataManagerFactory._build_manager()._create(s3_object_dn_config, None, None)
         assert isinstance(aws_s3_object_dn, S3ObjectDataNode)
         assert aws_s3_object_dn.storage_type() == "s3_object"
         assert aws_s3_object_dn.config_id == "foo_bar_aws_s3"

+ 9 - 2
tests/core/data/test_csv_data_node.py

@@ -56,7 +56,7 @@ class TestCSVDataNode:
         csv_dn_config = Config.configure_csv_data_node(
             id="foo_bar", default_path=default_path, has_header=False, name="super name"
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(csv_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(csv_dn_config, None, None)
         assert isinstance(dn, CSVDataNode)
         assert dn.storage_type() == "csv"
         assert dn.config_id == "foo_bar"
@@ -74,7 +74,7 @@ class TestCSVDataNode:
         csv_dn_config = Config.configure_csv_data_node(
             id="foo", default_path=default_path, has_header=True, exposed_type=MyCustomObject
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(csv_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(csv_dn_config, None, None)
         assert dn.storage_type() == "csv"
         assert dn.config_id == "foo"
         assert dn.properties["has_header"] is True
@@ -136,6 +136,7 @@ class TestCSVDataNode:
 
     def test_set_path(self):
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.csv"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "foo.csv"
         dn.path = "bar.csv"
         assert dn.path == "bar.csv"
@@ -144,6 +145,7 @@ class TestCSVDataNode:
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.csv")
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         read_data = dn.read()
         assert read_data is not None
         dn.path = new_path
@@ -218,6 +220,7 @@ class TestCSVDataNode:
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.csv"))
         pd.DataFrame([]).to_csv(temp_file_path)
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         dn.write(pd.DataFrame([1, 2, 3]))
         previous_edit_date = dn.last_edit_date
@@ -291,6 +294,7 @@ class TestCSVDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -308,6 +312,7 @@ class TestCSVDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         upload_content = pd.read_csv(csv_file)
         dn.lock_edit("editor_id_1")
@@ -339,6 +344,7 @@ class TestCSVDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -389,6 +395,7 @@ class TestCSVDataNode:
         pd.DataFrame(new_data).to_csv(new_csv_path, index=False)
 
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "numpy"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 

+ 42 - 42
tests/core/data/test_data_manager.py

@@ -35,12 +35,12 @@ def file_exists(file_path: str) -> bool:
 class TestDataManager:
     def test_create_data_node_and_modify_properties_does_not_modify_config(self):
         dn_config = Config.configure_data_node(id="name", foo="bar")
-        dn = _DataManager._create_and_set(dn_config, None, None)
+        dn = _DataManager._create(dn_config, None, None)
         assert dn_config.properties.get("foo") == "bar"
         assert dn_config.properties.get("baz") is None
 
         dn.properties["baz"] = "qux"
-        _DataManager._set(dn)
+        _DataManager._update(dn)
         assert dn_config.properties.get("foo") == "bar"
         assert dn_config.properties.get("baz") is None
         assert dn.properties.get("foo") == "bar"
@@ -75,7 +75,7 @@ class TestDataManager:
 
     def test_create_data_node_with_name_provided(self):
         dn_config = Config.configure_data_node(id="dn", foo="bar", name="acb")
-        dn = _DataManager._create_and_set(dn_config, None, None)
+        dn = _DataManager._create(dn_config, None, None)
         assert dn.name == "acb"
 
     def test_create_and_get_csv_data_node(self):
@@ -84,7 +84,7 @@ class TestDataManager:
         # - a default scenario scope
         # - No owner_id
         csv_dn_config = Config.configure_data_node(id="foo", storage_type="csv", path="bar", has_header=True)
-        csv_dn = _DataManager._create_and_set(csv_dn_config, None, None)
+        csv_dn = _DataManager._create(csv_dn_config, None, None)
 
         assert isinstance(csv_dn, CSVDataNode)
         assert isinstance(_DataManager._get(csv_dn.id), CSVDataNode)
@@ -148,7 +148,7 @@ class TestDataManager:
 
     def test_edit_and_get_data_node(self):
         config = Config.configure_pickle_data_node(id="foo")
-        dn = _DataManager._create_and_set(config, None, None)
+        dn = _DataManager._create(config, None, None)
 
         assert _DataManager._get(dn.id).last_edit_date is None
         assert len(_DataManager._get(dn.id).properties) == 2  # is_generated and path
@@ -187,7 +187,7 @@ class TestDataManager:
         in_memory_dn_config = Config.configure_data_node(
             id="baz", storage_type="in_memory", scope=Scope.SCENARIO, default_data="qux", other_data="foo"
         )
-        in_mem_dn = _DataManager._create_and_set(in_memory_dn_config, "Scenario_id", {"task_id"})
+        in_mem_dn = _DataManager._create(in_memory_dn_config, "Scenario_id", {"task_id"})
 
         assert isinstance(in_mem_dn, InMemoryDataNode)
         assert isinstance(_DataManager._get(in_mem_dn.id), InMemoryDataNode)
@@ -240,7 +240,7 @@ class TestDataManager:
         # - No owner id
         # - no default data
         dn_config = Config.configure_data_node(id="plop", storage_type="pickle", scope=Scope.CYCLE)
-        pickle_dn = _DataManager._create_and_set(dn_config, None, {"task_id_1", "task_id_2"})
+        pickle_dn = _DataManager._create(dn_config, None, {"task_id_1", "task_id_2"})
 
         assert isinstance(pickle_dn, PickleDataNode)
         assert isinstance(_DataManager._get(pickle_dn.id), PickleDataNode)
@@ -287,26 +287,26 @@ class TestDataManager:
     def test_create_raises_exception_with_wrong_type(self):
         wrong_type_dn_config = DataNodeConfig(id="foo", storage_type="bar", scope=DataNodeConfig._DEFAULT_SCOPE)
         with pytest.raises(InvalidDataNodeType):
-            _DataManager._create_and_set(wrong_type_dn_config, None, None)
+            _DataManager._create(wrong_type_dn_config, None, None)
 
     def test_create_from_same_config_generates_new_data_node_and_new_id(self):
         dn_config = Config.configure_data_node(id="foo", storage_type="in_memory")
-        dn = _DataManager._create_and_set(dn_config, None, None)
-        dn_2 = _DataManager._create_and_set(dn_config, None, None)
+        dn = _DataManager._create(dn_config, None, None)
+        dn_2 = _DataManager._create(dn_config, None, None)
         assert dn_2.id != dn.id
 
     def test_create_uses_overridden_attributes_in_config_file(self):
         Config.override(os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/config.toml"))
 
         csv_dn_cfg = Config.configure_data_node(id="foo", storage_type="csv", path="bar", has_header=True)
-        csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None)
+        csv_dn = _DataManager._create(csv_dn_cfg, None, None)
         assert csv_dn.config_id == "foo"
         assert isinstance(csv_dn, CSVDataNode)
         assert csv_dn._path == "path_from_config_file"
         assert csv_dn.properties["has_header"]
 
         csv_dn_cfg = Config.configure_data_node(id="baz", storage_type="csv", path="bar", has_header=True)
-        csv_dn = _DataManager._create_and_set(csv_dn_cfg, None, None)
+        csv_dn = _DataManager._create(csv_dn_cfg, None, None)
         assert csv_dn.config_id == "baz"
         assert isinstance(csv_dn, CSVDataNode)
         assert csv_dn._path == "bar"
@@ -319,11 +319,11 @@ class TestDataManager:
     def test_get_all(self):
         assert len(_DataManager._get_all()) == 0
         dn_config_1 = Config.configure_data_node(id="foo", storage_type="in_memory")
-        _DataManager._create_and_set(dn_config_1, None, None)
+        _DataManager._create(dn_config_1, None, None)
         assert len(_DataManager._get_all()) == 1
         dn_config_2 = Config.configure_data_node(id="baz", storage_type="in_memory")
-        _DataManager._create_and_set(dn_config_2, None, None)
-        _DataManager._create_and_set(dn_config_2, None, None)
+        _DataManager._create(dn_config_2, None, None)
+        _DataManager._create(dn_config_2, None, None)
         assert len(_DataManager._get_all()) == 3
         assert len([dn for dn in _DataManager._get_all() if dn.config_id == "foo"]) == 1
         assert len([dn for dn in _DataManager._get_all() if dn.config_id == "baz"]) == 2
@@ -334,7 +334,7 @@ class TestDataManager:
         # Only version 2.0 has the data node with config_id = "config_id_6"
         for version in range(1, 3):
             for i in range(5):
-                _DataManager._set(
+                _DataManager._repository._save(
                     InMemoryDataNode(
                         f"config_id_{i + version}",
                         Scope.SCENARIO,
@@ -363,7 +363,7 @@ class TestDataManager:
         assert len(_DataManager._get_all_by(filters=[{"version": "2.0", "config_id": "config_id_1"}])) == 0
         assert len(_DataManager._get_all_by(filters=[{"version": "2.0", "config_id": "config_id_6"}])) == 1
 
-    def test_set(self):
+    def test_save_and_update(self):
         dn = InMemoryDataNode(
             "config_id",
             Scope.SCENARIO,
@@ -377,26 +377,26 @@ class TestDataManager:
         )
         assert len(_DataManager._get_all()) == 0
         assert not _DataManager._exists(dn.id)
-        _DataManager._set(dn)
+        _DataManager._repository._save(dn)
         assert len(_DataManager._get_all()) == 1
         assert _DataManager._exists(dn.id)
 
         # changing data node attribute
         dn._config_id = "foo"
         assert dn.config_id == "foo"
-        _DataManager._set(dn)
+        _DataManager._update(dn)
         assert len(_DataManager._get_all()) == 1
         assert dn.config_id == "foo"
         assert _DataManager._get(dn.id).config_id == "foo"
 
     def test_delete(self):
-        dn_1 = InMemoryDataNode("config_id", Scope.SCENARIO, id="id_1")
-        dn_2 = InMemoryDataNode("config_id", Scope.SCENARIO, id="id_2")
-        dn_3 = InMemoryDataNode("config_id", Scope.SCENARIO, id="id_3")
+        dn_1 = InMemoryDataNode("config_id", Scope.SCENARIO, id=DataNodeId("id_1"))
+        dn_2 = InMemoryDataNode("config_id", Scope.SCENARIO, id=DataNodeId("id_2"))
+        dn_3 = InMemoryDataNode("config_id", Scope.SCENARIO, id=DataNodeId("id_3"))
         assert len(_DataManager._get_all()) == 0
-        _DataManager._set(dn_1)
-        _DataManager._set(dn_2)
-        _DataManager._set(dn_3)
+        _DataManager._repository._save(dn_1)
+        _DataManager._repository._save(dn_2)
+        _DataManager._repository._save(dn_3)
         assert len(_DataManager._get_all()) == 3
         assert all(_DataManager._exists(dn.id) for dn in [dn_1, dn_2, dn_3])
         _DataManager._delete(dn_1.id)
@@ -684,16 +684,16 @@ class TestDataManager:
         dn_config_2 = Config.configure_data_node("dn_2", scope=Scope.SCENARIO)
         dn_config_3 = Config.configure_data_node("dn_3", scope=Scope.SCENARIO)
 
-        dn_1_1 = _DataManager._create_and_set(dn_config_1, None, None)
-        dn_1_2 = _DataManager._create_and_set(dn_config_1, None, None)
-        dn_1_3 = _DataManager._create_and_set(dn_config_1, None, None)
+        dn_1_1 = _DataManager._create(dn_config_1, None, None)
+        dn_1_2 = _DataManager._create(dn_config_1, None, None)
+        dn_1_3 = _DataManager._create(dn_config_1, None, None)
         assert len(_DataManager._get_all()) == 3
 
-        dn_2_1 = _DataManager._create_and_set(dn_config_2, None, None)
-        dn_2_2 = _DataManager._create_and_set(dn_config_2, None, None)
+        dn_2_1 = _DataManager._create(dn_config_2, None, None)
+        dn_2_2 = _DataManager._create(dn_config_2, None, None)
         assert len(_DataManager._get_all()) == 5
 
-        dn_3_1 = _DataManager._create_and_set(dn_config_3, None, None)
+        dn_3_1 = _DataManager._create(dn_config_3, None, None)
         assert len(_DataManager._get_all()) == 6
 
         dn_1_datanodes = _DataManager._get_by_config_id(dn_config_1.id)
@@ -713,28 +713,28 @@ class TestDataManager:
         dn_config_2 = Config.configure_data_node("dn_2", scope=Scope.SCENARIO)
 
         _VersionManager._set_experiment_version("1.0")
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_2, None, None)
-        _DataManager._create_and_set(dn_config_2, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_2, None, None)
+        _DataManager._create(dn_config_2, None, None)
 
         assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3
         assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2
 
         _VersionManager._set_experiment_version("2.0")
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_1, None, None)
-        _DataManager._create_and_set(dn_config_2, None, None)
-        _DataManager._create_and_set(dn_config_2, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_1, None, None)
+        _DataManager._create(dn_config_2, None, None)
+        _DataManager._create(dn_config_2, None, None)
 
         assert len(_DataManager._get_by_config_id(dn_config_1.id)) == 3
         assert len(_DataManager._get_by_config_id(dn_config_2.id)) == 2
 
     def test_can_duplicate(self):
         dn_config = Config.configure_data_node("dn_1")
-        dn = _DataManager._create_and_set(dn_config, None, None)
+        dn = _DataManager._create(dn_config, None, None)
 
         reasons = _DataManager._can_duplicate(dn.id)
         assert bool(reasons)

+ 36 - 25
tests/core/data/test_data_node.py

@@ -60,7 +60,7 @@ class TestDataNode:
         data_manager = _DataManagerFactory()._build_manager()
 
         dn_id = data_node.id
-        data_manager._set(data_node)
+        data_manager._repository._save(data_node)
 
         # # To test if instance is same type
         task = Task("task", {}, print, [], [], dn_id)
@@ -149,6 +149,7 @@ class TestDataNode:
 
     def test_read_write(self):
         dn = FakeDataNode("foo_bar")
+        _DataManagerFactory._build_manager()._repository._save(dn)
         with pytest.raises(NoData):
             assert dn.read() is None
             dn.read_or_raise()
@@ -197,6 +198,7 @@ class TestDataNode:
 
     def test_locked_dn_unlockable_only_by_same_editor(self):
         dn = InMemoryDataNode("dn", Scope.SCENARIO)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.lock_edit("user_1")
         assert dn.edit_in_progress
         assert dn._editor_id == "user_1"
@@ -212,6 +214,7 @@ class TestDataNode:
 
     def test_none_editor_can_lock_a_locked_dn(self):
         dn = InMemoryDataNode("dn", Scope.SCENARIO)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.lock_edit("user")
         assert dn.edit_in_progress
         assert dn._editor_id == "user"
@@ -223,6 +226,7 @@ class TestDataNode:
 
     def test_none_editor_can_unlock_a_locked_dn(self):
         dn = InMemoryDataNode("dn", Scope.SCENARIO)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.lock_edit("user")
         assert dn.edit_in_progress
         assert dn._editor_id == "user"
@@ -243,6 +247,7 @@ class TestDataNode:
 
     def test_ready_for_reading(self):
         dn = InMemoryDataNode("foo_bar", Scope.CYCLE)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.last_edit_date is None
         assert not dn.is_ready_for_reading
         assert dn.job_ids == []
@@ -268,8 +273,10 @@ class TestDataNode:
         assert dn.job_ids == [job_id]
 
     def test_is_valid_no_validity_period(self):
-        # Test Never been written
         dn = InMemoryDataNode("foo", Scope.SCENARIO, DataNodeId("id"), "name", "owner_id")
+        _DataManagerFactory._build_manager()._repository._save(dn)
+
+        # Test Never been written
         assert not dn.is_valid
 
         # test has been written
@@ -277,10 +284,12 @@ class TestDataNode:
         assert dn.is_valid
 
     def test_is_valid_with_30_min_validity_period(self):
-        # Test Never been written
         dn = InMemoryDataNode(
             "foo", Scope.SCENARIO, DataNodeId("id"), "name", "owner_id", validity_period=timedelta(minutes=30)
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
+
+        # Test Never been written
         assert dn.is_valid is False
 
         # Has been written less than 30 minutes ago
@@ -292,8 +301,10 @@ class TestDataNode:
         assert dn.is_valid is False
 
     def test_is_valid_with_5_days_validity_period(self):
-        # Test Never been written
         dn = InMemoryDataNode("foo", Scope.SCENARIO, validity_period=timedelta(days=5))
+        _DataManagerFactory._build_manager()._repository._save(dn)
+
+        # Test Never been written
         assert dn.is_valid is False
 
         # Has been written less than 30 minutes ago
@@ -302,7 +313,7 @@ class TestDataNode:
 
         # Has been written more than 30 minutes ago
         dn._last_edit_date = datetime.now() - timedelta(days=6)
-        _DataManager()._set(dn)
+        _DataManager()._repository._save(dn)
         assert dn.is_valid is False
 
     def test_is_up_to_date(self, current_datetime):
@@ -441,7 +452,7 @@ class TestDataNode:
     def test_data_node_update_after_writing(self):
         dn = FakeDataNode("foo")
 
-        _DataManager._set(dn)
+        _DataManager._repository._save(dn)
         assert not _DataManager._get(dn.id).is_ready_for_reading
         dn.write("Any data")
 
@@ -459,7 +470,7 @@ class TestDataNode:
 
         assert dn.validity_period is None
 
-    def test_auto_set_and_reload(self, current_datetime):
+    def test_auto_update_and_reload(self, current_datetime):
         dn_1 = InMemoryDataNode(
             "foo",
             scope=Scope.GLOBAL,
@@ -476,7 +487,7 @@ class TestDataNode:
         )
 
         dm = _DataManager()
-        dm._set(dn_1)
+        dm._repository._save(dn_1)
 
         dn_2 = dm._get(dn_1)
 
@@ -517,16 +528,16 @@ class TestDataNode:
         assert dn_1.parent_ids == set()
         assert dn_2.parent_ids == set()
         dn_1._parent_ids.update(["sc2"])
-        _DataManager._set(dn_1)
+        _DataManager._update(dn_1)
         assert dn_1.parent_ids == {"sc2"}
         assert dn_2.parent_ids == {"sc2"}
         dn_2._parent_ids.clear()
         dn_2._parent_ids.update(["sc1"])
-        _DataManager._set(dn_2)
+        _DataManager._update(dn_2)
         assert dn_1.parent_ids == {"sc1"}
         assert dn_2.parent_ids == {"sc1"}
         dn_2._parent_ids.clear()
-        _DataManager._set(dn_2)
+        _DataManager._update(dn_2)
 
         # auto set & reload on edit_in_progress attribute
         assert not dn_2.edit_in_progress
@@ -596,11 +607,11 @@ class TestDataNode:
         assert not dn_1._is_in_context
         assert len(dn_1.job_ids) == 1
 
-    def test_auto_set_and_reload_properties(self):
+    def test_auto_update_and_reload_properties(self):
         dn_1 = InMemoryDataNode("foo", scope=Scope.GLOBAL, properties={"name": "def"})
 
         dm = _DataManager()
-        dm._set(dn_1)
+        dm._repository._save(dn_1)
 
         dn_2 = dm._get(dn_1)
 
@@ -808,9 +819,9 @@ class TestDataNode:
     def test_locked_data_node_append_should_fail_with_wrong_editor(self):
         dn_config = Config.configure_csv_data_node("A")
         dn = _DataManager._bulk_get_or_create([dn_config])[dn_config]
-        first_line = pd.DataFrame(data={'col1': [1], 'col2': [3]})
-        second_line = pd.DataFrame(data={'col1': [2], 'col2': [4]})
-        data = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
+        first_line = pd.DataFrame(data={"col1": [1], "col2": [3]})
+        second_line = pd.DataFrame(data={"col1": [2], "col2": [4]})
+        data = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
         dn.write(first_line)
         assert first_line.equals(dn.read())
 
@@ -825,9 +836,9 @@ class TestDataNode:
     def test_locked_data_node_append_should_fail_before_expiration_date_and_succeed_after(self):
         dn_config = Config.configure_csv_data_node("A")
         dn = _DataManager._bulk_get_or_create([dn_config])[dn_config]
-        first_line = pd.DataFrame(data={'col1': [1], 'col2': [3]})
-        second_line = pd.DataFrame(data={'col1': [2], 'col2': [4]})
-        data = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
+        first_line = pd.DataFrame(data={"col1": [1], "col2": [3]})
+        second_line = pd.DataFrame(data={"col1": [2], "col2": [4]})
+        data = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
         dn.write(first_line)
         assert first_line.equals(dn.read())
 
@@ -857,7 +868,7 @@ class TestDataNode:
     def test_editor_fails_writing_a_data_node_locked_by_orchestrator(self):
         dn_config = Config.configure_data_node("A")
         dn = _DataManager._bulk_get_or_create([dn_config])[dn_config]
-        dn.lock_edit() # Locked by orchestrator
+        dn.lock_edit()  # Locked by orchestrator
 
         with pytest.raises(DataNodeIsBeingEdited):
             dn.write("data", editor_id="editor_1")
@@ -869,13 +880,13 @@ class TestDataNode:
     def test_editor_fails_appending_a_data_node_locked_by_orchestrator(self):
         dn_config = Config.configure_csv_data_node("A")
         dn = _DataManager._bulk_get_or_create([dn_config])[dn_config]
-        first_line = pd.DataFrame(data={'col1': [1], 'col2': [3]})
-        second_line = pd.DataFrame(data={'col1': [2], 'col2': [4]})
-        data = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
+        first_line = pd.DataFrame(data={"col1": [1], "col2": [3]})
+        second_line = pd.DataFrame(data={"col1": [2], "col2": [4]})
+        data = pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})
         dn.write(first_line)
         assert first_line.equals(dn.read())
         dn = _DataManager._bulk_get_or_create([dn_config])[dn_config]
-        dn.lock_edit() # Locked by orchestrator
+        dn.lock_edit()  # Locked by orchestrator
 
         with pytest.raises(DataNodeIsBeingEdited):
             dn.append(second_line, editor_id="editor_1")
@@ -897,7 +908,7 @@ class TestDataNode:
         after = datetime.now()
         timestamp = datetime.now()
         data_node.track_edit(timestamp=timestamp)
-        _DataManagerFactory._build_manager()._set(data_node)
+        _DataManagerFactory._build_manager()._update(data_node)
         # To save the edits because track edit does not save the data node
 
         assert len(data_node.edits) == 6

+ 13 - 5
tests/core/data/test_excel_data_node.py

@@ -84,7 +84,7 @@ class TestExcelDataNode:
         excel_dn_config = Config.configure_excel_data_node(
             id="foo_bar", default_path=path, has_header=False, sheet_name="Sheet1", name="super name"
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(excel_dn_config, None, None)
         assert isinstance(dn, ExcelDataNode)
         assert dn.storage_type() == "excel"
         assert dn.config_id == "foo_bar"
@@ -103,7 +103,7 @@ class TestExcelDataNode:
         excel_dn_config_1 = Config.configure_excel_data_node(
             id="baz", default_path=path, has_header=True, sheet_name="Sheet1", exposed_type=MyCustomObject
         )
-        dn_1 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_1, None, None)
+        dn_1 = _DataManagerFactory._build_manager()._create(excel_dn_config_1, None, None)
         assert isinstance(dn_1, ExcelDataNode)
         assert dn_1.properties["has_header"] is True
         assert dn_1.properties["sheet_name"] == "Sheet1"
@@ -116,7 +116,7 @@ class TestExcelDataNode:
             sheet_name=sheet_names,
             exposed_type={"Sheet1": "pandas", "Sheet2": "numpy"},
         )
-        dn_2 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_2, None, None)
+        dn_2 = _DataManagerFactory._build_manager()._create(excel_dn_config_2, None, None)
         assert isinstance(dn_2, ExcelDataNode)
         assert dn_2.properties["sheet_name"] == sheet_names
         assert dn_2.properties["exposed_type"] == {"Sheet1": "pandas", "Sheet2": "numpy"}
@@ -124,7 +124,7 @@ class TestExcelDataNode:
         excel_dn_config_3 = Config.configure_excel_data_node(
             id="baz", default_path=path, has_header=True, sheet_name=sheet_names, exposed_type=MyCustomObject
         )
-        dn_3 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_3, None, None)
+        dn_3 = _DataManagerFactory._build_manager()._create(excel_dn_config_3, None, None)
         assert isinstance(dn_3, ExcelDataNode)
         assert dn_3.properties["sheet_name"] == sheet_names
         assert dn_3.properties["exposed_type"] == MyCustomObject
@@ -136,7 +136,7 @@ class TestExcelDataNode:
             sheet_name=sheet_names,
             exposed_type={"Sheet1": MyCustomObject, "Sheet2": MyCustomObject2},
         )
-        dn_4 = _DataManagerFactory._build_manager()._create_and_set(excel_dn_config_4, None, None)
+        dn_4 = _DataManagerFactory._build_manager()._create(excel_dn_config_4, None, None)
         assert isinstance(dn_4, ExcelDataNode)
         assert dn_4.properties["sheet_name"] == sheet_names
         assert dn_4.properties["exposed_type"] == {"Sheet1": MyCustomObject, "Sheet2": MyCustomObject2}
@@ -172,6 +172,7 @@ class TestExcelDataNode:
 
     def test_set_path(self):
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.xlsx"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "foo.xlsx"
         dn.path = "bar.xlsx"
         assert dn.path == "bar.xlsx"
@@ -192,6 +193,7 @@ class TestExcelDataNode:
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
         new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.xlsx")
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         read_data = dn.read()
         assert read_data is not None
         dn.path = new_path
@@ -207,6 +209,7 @@ class TestExcelDataNode:
             pathlib.Path(__file__).parent.resolve(), "data_sample/example_2.xlsx"
         )  # ["Sheet1", "Sheet2", "Sheet3"]
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"default_path": path, "exposed_type": MyCustomObject1})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.properties["exposed_type"] == MyCustomObject1
         dn.read()
         dn.path = new_path
@@ -253,6 +256,7 @@ class TestExcelDataNode:
             Scope.SCENARIO,
             properties={"default_path": path_1, "exposed_type": [MyCustomObject1, MyCustomObject2]},
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         data = dn.read()
         assert isinstance(data, Dict)
         assert isinstance(data["Sheet1"][0], MyCustomObject1)
@@ -475,6 +479,7 @@ class TestExcelDataNode:
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.xlsx"))
         pd.DataFrame([]).to_excel(temp_file_path)
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         dn.write(pd.DataFrame([1, 2, 3]))
         previous_edit_date = dn.last_edit_date
@@ -543,6 +548,7 @@ class TestExcelDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": old_xlsx_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -560,6 +566,7 @@ class TestExcelDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": old_xlsx_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -613,6 +620,7 @@ class TestExcelDataNode:
         pd.DataFrame(new_data).to_excel(new_excel_path, index=False)
 
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": old_excel_path, "exposed_type": "numpy"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 

+ 4 - 4
tests/core/data/test_file_datanode_mixin.py

@@ -25,10 +25,12 @@ def test_duplicate_data():
     copy = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
     copy_2 = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
     copy_copy = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
-    _DataManager._set(src)
+    _DataManager._repository._save(src)
+    _DataManager._repository._save(copy)
+    _DataManager._repository._save(copy_2)
+    _DataManager._repository._save(copy_copy)
 
     src._duplicate_file(copy)
-    _DataManager._set(copy)
     assert _normalize_path(src.path) == _normalize_path(path)
     assert _normalize_path(src.path) != _normalize_path(copy.path)
     assert filecmp.cmp(path, copy.path)
@@ -36,12 +38,10 @@ def test_duplicate_data():
     assert copy.path.count("DUPLICATE_OF") == 1
 
     src._duplicate_file(copy_2)
-    _DataManager._set(copy_2)
     assert _normalize_path(copy.path) != _normalize_path(copy_2.path)
     assert copy_2.path.count("DUPLICATE_OF") == 1
 
     copy._duplicate_file(copy_copy)
-    _DataManager._set(copy_copy)
     assert copy_copy.path.count("DUPLICATE_OF") == 2
 
     os.unlink(copy.path)

+ 3 - 0
tests/core/data/test_filter_csv_data_node.py

@@ -18,6 +18,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.csv import CSVDataNode
 from taipy.core.data.operator import JoinOperator, Operator
 
@@ -32,6 +33,7 @@ def cleanup():
 
 def test_filter_pandas_exposed_type(csv_file):
     dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "pandas"})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         [
             {"foo": 1, "bar": 1},
@@ -82,6 +84,7 @@ def test_filter_pandas_exposed_type(csv_file):
 
 def test_filter_numpy_exposed_type(csv_file):
     dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "numpy"})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         np.array(
             [

+ 2 - 0
tests/core/data/test_filter_data_node.py

@@ -15,6 +15,7 @@ import numpy as np
 import pandas as pd
 import pytest
 
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.operator import JoinOperator, Operator
 
 from .utils import (
@@ -31,6 +32,7 @@ from .utils import (
 
 def test_filter_pandas_exposed_type(default_data_frame):
     dn = FakeDataNode("fake_dn")
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write("Any data")
 
     with pytest.raises(NotImplementedError):

+ 7 - 0
tests/core/data/test_filter_excel_data_node.py

@@ -18,6 +18,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.excel import ExcelDataNode
 from taipy.core.data.operator import JoinOperator, Operator
 
@@ -34,6 +35,7 @@ def test_filter_pandas_exposed_type_with_sheetname(excel_file):
     dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "pandas"}
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         [
             {"foo": 1, "bar": 1},
@@ -84,6 +86,7 @@ def test_filter_pandas_exposed_type_with_sheetname(excel_file):
 
 def test_filter_pandas_exposed_type_without_sheetname(excel_file):
     dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "pandas"})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         [
             {"foo": 1, "bar": 1},
@@ -110,6 +113,7 @@ def test_filter_pandas_exposed_type_multisheet(excel_file):
         Scope.SCENARIO,
         properties={"path": excel_file, "sheet_name": ["sheet_1", "sheet_2"], "exposed_type": "pandas"},
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         {
             "sheet_1": pd.DataFrame(
@@ -161,6 +165,7 @@ def test_filter_numpy_exposed_type_with_sheetname(excel_file):
     dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "numpy"}
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         [
             [1, 1],
@@ -198,6 +203,7 @@ def test_filter_numpy_exposed_type_with_sheetname(excel_file):
 
 def test_filter_numpy_exposed_type_without_sheetname(excel_file):
     dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "numpy"})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         [
             [1, 1],
@@ -227,6 +233,7 @@ def test_filter_numpy_exposed_type_multisheet(excel_file):
         Scope.SCENARIO,
         properties={"path": excel_file, "sheet_name": ["sheet_1", "sheet_2"], "exposed_type": "numpy"},
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     dn.write(
         {
             "sheet_1": pd.DataFrame(

+ 3 - 0
tests/core/data/test_filter_parquet_data_node.py

@@ -19,6 +19,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.parquet import ParquetDataNode
 
@@ -55,6 +56,7 @@ class TestFilterParquetDataNode:
 
     def test_filter_pandas_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             [
                 {"foo": 1, "bar": 1},
@@ -104,6 +106,7 @@ class TestFilterParquetDataNode:
 
     def test_filter_numpy_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             [
                 [1, 1],

+ 3 - 0
tests/core/data/test_filter_sql_table_data_node.py

@@ -16,6 +16,7 @@ import pandas as pd
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.operator import JoinOperator, Operator
 from taipy.core.data.sql_table import SQLTableDataNode
 
@@ -40,6 +41,7 @@ class TestFilterSQLTableDataNode:
             "exposed_type": "pandas",
         }
         dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             pd.DataFrame(
                 [
@@ -95,6 +97,7 @@ class TestFilterSQLTableDataNode:
             "exposed_type": "numpy",
         }
         dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             pd.DataFrame(
                 [

+ 11 - 5
tests/core/data/test_generic_data_node.py

@@ -59,7 +59,7 @@ class TestGenericDataNode:
         generic_dn_config = Config.configure_generic_data_node(
             id="foo_bar", read_fct=read_fct, write_fct=write_fct, name="super name"
         )
-        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        dn = data_manager._create(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.config_id == "foo_bar"
@@ -79,7 +79,7 @@ class TestGenericDataNode:
     def test_create_with_read_fct_and_none_write_fct(self):
         data_manager = _DataManagerFactory._build_manager()
         generic_dn_config = Config.configure_generic_data_node(id="foo", read_fct=read_fct, write_fct=None, name="foo")
-        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        dn = data_manager._create(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.config_id == "foo"
@@ -96,7 +96,7 @@ class TestGenericDataNode:
     def test_create_with_write_fct_and_none_read_fct(self):
         data_manager = _DataManagerFactory._build_manager()
         generic_dn_config = Config.configure_generic_data_node(id="xyz", read_fct=None, write_fct=write_fct, name="xyz")
-        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        dn = data_manager._create(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.config_id == "xyz"
@@ -113,7 +113,7 @@ class TestGenericDataNode:
     def test_create_with_read_fct(self):
         data_manager = _DataManagerFactory._build_manager()
         generic_dn_config = Config.configure_generic_data_node(id="acb", read_fct=read_fct, name="acb")
-        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        dn = data_manager._create(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.config_id == "acb"
@@ -130,7 +130,7 @@ class TestGenericDataNode:
     def test_create_with_write_fct(self):
         data_manager = _DataManagerFactory._build_manager()
         generic_dn_config = Config.configure_generic_data_node(id="mno", write_fct=write_fct, name="mno")
-        dn = data_manager._create_and_set(generic_dn_config, None, None)
+        dn = data_manager._create(generic_dn_config, None, None)
         assert isinstance(dn, GenericDataNode)
         assert dn.storage_type() == "generic"
         assert dn.config_id == "mno"
@@ -168,6 +168,7 @@ class TestGenericDataNode:
 
     def test_read_write_generic_datanode(self):
         generic_dn = GenericDataNode("foo", Scope.SCENARIO, properties={"read_fct": read_fct, "write_fct": write_fct})
+        _DataManagerFactory._build_manager()._repository._save(generic_dn)
 
         assert generic_dn.read() == self.data
         assert len(generic_dn.read()) == 10
@@ -185,6 +186,7 @@ class TestGenericDataNode:
             generic_dn_1.write(self.data)
 
         generic_dn_2 = GenericDataNode("xyz", Scope.SCENARIO, properties={"read_fct": None, "write_fct": write_fct})
+        _DataManagerFactory._build_manager()._repository._save(generic_dn_2)
 
         generic_dn_2.write(self.data)
         assert len(self.data) == 12
@@ -193,6 +195,7 @@ class TestGenericDataNode:
             generic_dn_2.read()
 
         generic_dn_3 = GenericDataNode("bar", Scope.SCENARIO, properties={"read_fct": None, "write_fct": None})
+        _DataManagerFactory._build_manager()._repository._save(generic_dn_3)
 
         with pytest.raises(MissingReadFunction):
             generic_dn_3.read()
@@ -212,6 +215,7 @@ class TestGenericDataNode:
                 "write_fct_args": [2],
             },
         )
+        _DataManagerFactory._build_manager()._repository._save(generic_dn)
 
         assert all(a + 1 == b for a, b in zip(self.data, generic_dn.read()))
         assert len(generic_dn.read()) == 10
@@ -232,6 +236,7 @@ class TestGenericDataNode:
                 "write_fct_args": 2,
             },
         )
+        _DataManagerFactory._build_manager()._repository._save(generic_dn)
 
         assert all(a + 1 == b for a, b in zip(self.data, generic_dn.read()))
         assert len(generic_dn.read()) == 10
@@ -245,6 +250,7 @@ class TestGenericDataNode:
         generic_dn = GenericDataNode(
             "foo", Scope.SCENARIO, properties={"read_fct": read_fct_modify_data_node_name, "write_fct": write_fct}
         )
+        _DataManagerFactory._build_manager()._repository._save(generic_dn)
         generic_dn._properties["read_fct_args"] = (generic_dn.id, "bar")
         generic_dn.read()
         assert generic_dn.name == "bar"

+ 4 - 2
tests/core/data/test_in_memory_data_node.py

@@ -25,7 +25,7 @@ class TestInMemoryDataNodeEntity:
         in_memory_dn_config = Config.configure_in_memory_data_node(
             id="foobar_bazy", default_data="In memory Data Node", name="my name"
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(in_memory_dn_config, "owner_id", None)
+        dn = _DataManagerFactory._build_manager()._create(in_memory_dn_config, "owner_id", None)
         assert isinstance(dn, InMemoryDataNode)
         assert dn.storage_type() == "in_memory"
         assert dn.config_id == "foobar_bazy"
@@ -38,7 +38,7 @@ class TestInMemoryDataNodeEntity:
         assert dn.read() == "In memory Data Node"
 
         in_memory_dn_config_2 = Config.configure_in_memory_data_node(id="foo")
-        dn_2 = _DataManagerFactory._build_manager()._create_and_set(in_memory_dn_config_2, None, None)
+        dn_2 = _DataManagerFactory._build_manager()._create(in_memory_dn_config_2, None, None)
         assert dn_2.last_edit_date is None
         assert not dn_2.is_ready_for_reading
 
@@ -51,10 +51,12 @@ class TestInMemoryDataNodeEntity:
 
     def test_read_and_write(self):
         no_data_dn = InMemoryDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"))
+        _DataManagerFactory._build_manager()._repository._save(no_data_dn)
         with pytest.raises(NoData):
             assert no_data_dn.read() is None
             no_data_dn.read_or_raise()
         in_mem_dn = InMemoryDataNode("foo", Scope.SCENARIO, properties={"default_data": "bar"})
+        _DataManagerFactory._build_manager()._repository._save(in_mem_dn)
         assert isinstance(in_mem_dn.read(), str)
         assert in_mem_dn.read() == "bar"
         in_mem_dn.properties["default_data"] = "baz"  # this modifies the default data value but not the data itself

+ 19 - 3
tests/core/data/test_json_data_node.py

@@ -93,7 +93,7 @@ class TestJSONDataNode:
     def test_create(self):
         path = "data/node/path"
         json_dn_config = Config.configure_json_data_node(id="foo_bar", default_path=path, name="super name")
-        dn_1 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config, None, None)
+        dn_1 = _DataManagerFactory._build_manager()._create(json_dn_config, None, None)
         assert isinstance(dn_1, JSONDataNode)
         assert dn_1.storage_type() == "json"
         assert dn_1.config_id == "foo_bar"
@@ -107,7 +107,7 @@ class TestJSONDataNode:
         assert dn_1.path == path
 
         json_dn_config_2 = Config.configure_json_data_node(id="foo", default_path=path, encoding="utf-16")
-        dn_2 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config_2, None, None)
+        dn_2 = _DataManagerFactory._build_manager()._create(json_dn_config_2, None, None)
         assert isinstance(dn_2, JSONDataNode)
         assert dn_2.storage_type() == "json"
         assert dn_2.properties["encoding"] == "utf-16"
@@ -115,7 +115,7 @@ class TestJSONDataNode:
         json_dn_config_3 = Config.configure_json_data_node(
             id="foo", default_path=path, encoder=MyCustomEncoder, decoder=MyCustomDecoder
         )
-        dn_3 = _DataManagerFactory._build_manager()._create_and_set(json_dn_config_3, None, None)
+        dn_3 = _DataManagerFactory._build_manager()._create(json_dn_config_3, None, None)
         assert isinstance(dn_3, JSONDataNode)
         assert dn_3.storage_type() == "json"
         assert dn_3.properties["encoder"] == MyCustomEncoder
@@ -198,6 +198,7 @@ class TestJSONDataNode:
 
     def test_append_to_list(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         original_data = json_dn.read()
 
         # Append a dictionary
@@ -212,6 +213,7 @@ class TestJSONDataNode:
 
     def test_append_to_a_dictionary(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         original_data = {"a": 1, "b": 2, "c": 3}
         json_dn.write(original_data)
 
@@ -227,6 +229,7 @@ class TestJSONDataNode:
 
     def test_write(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         data = {"a": 1, "b": 2, "c": 3}
         json_dn.write(data)
         assert np.array_equal(json_dn.read(), data)
@@ -235,9 +238,11 @@ class TestJSONDataNode:
         data = {"≥a": 1, "b": 2}
 
         utf8_dn = JSONDataNode("utf8_dn", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(utf8_dn)
         utf16_dn = JSONDataNode(
             "utf16_dn", Scope.SCENARIO, properties={"default_path": json_file, "encoding": "utf-16"}
         )
+        _DataManagerFactory._build_manager()._repository._save(utf16_dn)
 
         # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding
         utf8_dn.write(data)
@@ -259,6 +264,7 @@ class TestJSONDataNode:
 
     def test_write_date(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         now = datetime.datetime.now()
         data = {"date": now}
         json_dn.write(data)
@@ -267,6 +273,7 @@ class TestJSONDataNode:
 
     def test_write_enum(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         data = [MyEnum.A, MyEnum.B, MyEnum.C]
         json_dn.write(data)
         read_data = json_dn.read()
@@ -274,6 +281,7 @@ class TestJSONDataNode:
 
     def test_write_dataclass(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         json_dn.write(CustomDataclass(integer=1, string="foo"))
         read_data = json_dn.read()
         assert read_data.integer == 1
@@ -283,6 +291,7 @@ class TestJSONDataNode:
         json_dn = JSONDataNode(
             "foo", Scope.SCENARIO, properties={"default_path": json_file, "encoder": MyCustomEncoder}
         )
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         data = [MyCustomObject("1", 1, "abc"), 100]
         json_dn.write(data)
         read_data = json_dn.read()
@@ -298,6 +307,7 @@ class TestJSONDataNode:
             Scope.SCENARIO,
             properties={"default_path": json_file, "encoder": MyCustomEncoder, "decoder": MyCustomDecoder},
         )
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         data = [MyCustomObject("1", 1, "abc"), 100]
         json_dn.write(data)
         read_data = json_dn.read()
@@ -309,6 +319,7 @@ class TestJSONDataNode:
 
     def test_filter(self, json_file):
         json_dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": json_file})
+        _DataManagerFactory._build_manager()._repository._save(json_dn)
         json_dn.write(
             [
                 {"foo": 1, "bar": 1},
@@ -343,6 +354,7 @@ class TestJSONDataNode:
 
     def test_set_path(self):
         dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.json"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "foo.json"
         dn.path = "bar.json"
         assert dn.path == "bar.json"
@@ -351,6 +363,7 @@ class TestJSONDataNode:
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/json/example_dict.json")
         new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.json")
         dn = JSONDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         read_data = dn.read()
         assert read_data is not None
         dn.path = new_path
@@ -363,6 +376,7 @@ class TestJSONDataNode:
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.json"))
         pd.DataFrame([]).to_json(temp_file_path)
         dn = JSONDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         dn.write([1, 2, 3])
         previous_edit_date = dn.last_edit_date
@@ -431,6 +445,7 @@ class TestJSONDataNode:
         old_data = [{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}]
 
         dn = JSONDataNode("foo", Scope.SCENARIO, properties={"path": old_json_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -449,6 +464,7 @@ class TestJSONDataNode:
         old_data = [{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}]
 
         dn = JSONDataNode("foo", Scope.SCENARIO, properties={"path": old_json_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 

+ 7 - 1
tests/core/data/test_mongo_data_node.py

@@ -79,7 +79,7 @@ class TestMongoCollectionDataNode:
     @pytest.mark.parametrize("properties", __properties)
     def test_create(self, properties):
         mongo_dn_config = Config.configure_mongo_collection_data_node("foo_bar", **properties)
-        mongo_dn = _DataManagerFactory._build_manager()._create_and_set(mongo_dn_config, None, None)
+        mongo_dn = _DataManagerFactory._build_manager()._create(mongo_dn_config, None, None)
         assert isinstance(mongo_dn, MongoCollectionDataNode)
         assert mongo_dn.storage_type() == "mongo_collection"
         assert mongo_dn.config_id == "foo_bar"
@@ -201,6 +201,7 @@ class TestMongoCollectionDataNode:
             Scope.SCENARIO,
             properties=custom_properties,
         )
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         mongo_dn.write(data)
 
         with pytest.raises(TypeError):
@@ -217,6 +218,7 @@ class TestMongoCollectionDataNode:
     )
     def test_append(self, properties, data):
         mongo_dn = MongoCollectionDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         mongo_dn.append(data)
 
         original_data = [{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}]
@@ -236,6 +238,7 @@ class TestMongoCollectionDataNode:
     )
     def test_write(self, properties, data, written_data):
         mongo_dn = MongoCollectionDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         mongo_dn.write(data)
 
         read_objects = mongo_dn.read()
@@ -259,6 +262,7 @@ class TestMongoCollectionDataNode:
             Scope.SCENARIO,
             properties=properties,
         )
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         mongo_dn.write(data)
 
         assert len(mongo_dn.read()) == 0
@@ -277,6 +281,7 @@ class TestMongoCollectionDataNode:
         custom_properties = properties.copy()
         custom_properties["custom_document"] = CustomObjectWithCustomEncoder
         mongo_dn = MongoCollectionDataNode("foo", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         data = [
             CustomObjectWithCustomEncoder("1", 1, "abc", datetime.now()),
             CustomObjectWithCustomEncoder("2", 2, "def", datetime.now()),
@@ -302,6 +307,7 @@ class TestMongoCollectionDataNode:
         custom_properties = properties.copy()
         custom_properties["custom_document"] = CustomObjectWithCustomEncoderDecoder
         mongo_dn = MongoCollectionDataNode("foo", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(mongo_dn)
         data = [
             CustomObjectWithCustomEncoderDecoder("1", 1, "abc", datetime.now()),
             CustomObjectWithCustomEncoderDecoder("2", 2, "def", datetime.now()),

+ 10 - 4
tests/core/data/test_parquet_data_node.py

@@ -75,7 +75,7 @@ class TestParquetDataNode:
         parquet_dn_config = Config.configure_parquet_data_node(
             id="foo_bar", default_path=path, compression=compression, name="super name"
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(parquet_dn_config, None, None)
         assert isinstance(dn, ParquetDataNode)
         assert dn.storage_type() == "parquet"
         assert dn.config_id == "foo_bar"
@@ -94,21 +94,21 @@ class TestParquetDataNode:
         parquet_dn_config_1 = Config.configure_parquet_data_node(
             id="bar", default_path=path, compression=compression, exposed_type=MyCustomObject
         )
-        dn_1 = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config_1, None, None)
+        dn_1 = _DataManagerFactory._build_manager()._create(parquet_dn_config_1, None, None)
         assert isinstance(dn_1, ParquetDataNode)
         assert dn_1.properties["exposed_type"] == MyCustomObject
 
         parquet_dn_config_2 = Config.configure_parquet_data_node(
             id="bar", default_path=path, compression=compression, exposed_type=np.ndarray
         )
-        dn_2 = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config_2, None, None)
+        dn_2 = _DataManagerFactory._build_manager()._create(parquet_dn_config_2, None, None)
         assert isinstance(dn_2, ParquetDataNode)
         assert dn_2.properties["exposed_type"] == np.ndarray
 
         parquet_dn_config_3 = Config.configure_parquet_data_node(
             id="bar", default_path=path, compression=compression, exposed_type=pd.DataFrame
         )
-        dn_3 = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config_3, None, None)
+        dn_3 = _DataManagerFactory._build_manager()._create(parquet_dn_config_3, None, None)
         assert isinstance(dn_3, ParquetDataNode)
         assert dn_3.properties["exposed_type"] == pd.DataFrame
 
@@ -172,6 +172,7 @@ class TestParquetDataNode:
 
     def test_set_path(self):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": "foo.parquet"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "foo.parquet"
         dn.path = "bar.parquet"
         assert dn.path == "bar.parquet"
@@ -195,6 +196,7 @@ class TestParquetDataNode:
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
         pd.DataFrame([]).to_parquet(temp_file_path)
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         dn.write(pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}))
         previous_edit_date = dn.last_edit_date
@@ -219,6 +221,7 @@ class TestParquetDataNode:
         pd.DataFrame([]).to_parquet(temp_file_path)
 
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_folder_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         initial_edit_date = dn.last_edit_date
 
         # Sleep so that the file can be created successfully on Ubuntu
@@ -293,6 +296,7 @@ class TestParquetDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -310,6 +314,7 @@ class TestParquetDataNode:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -362,6 +367,7 @@ class TestParquetDataNode:
         pd.DataFrame(new_data, columns=["a", "b", "c"]).to_parquet(new_parquet_path, index=False)
 
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "numpy"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 

+ 12 - 2
tests/core/data/test_pickle_data_node.py

@@ -51,14 +51,14 @@ class TestPickleDataNodeEntity:
 
     def test_create_with_manager(self, pickle_file_path):
         parquet_dn_config = Config.configure_pickle_data_node(id="baz", default_path=pickle_file_path)
-        parquet_dn = _DataManagerFactory._build_manager()._create_and_set(parquet_dn_config, None, None)
+        parquet_dn = _DataManagerFactory._build_manager()._create(parquet_dn_config, None, None)
         assert isinstance(parquet_dn, PickleDataNode)
 
     def test_create(self):
         pickle_dn_config = Config.configure_pickle_data_node(
             id="foobar_bazxyxea", default_path="Data", default_data="Data"
         )
-        dn = _DataManagerFactory._build_manager()._create_and_set(pickle_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(pickle_dn_config, None, None)
 
         assert isinstance(dn, PickleDataNode)
         assert dn.storage_type() == "pickle"
@@ -106,6 +106,7 @@ class TestPickleDataNodeEntity:
 
     def test_create_with_file_name(self):
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"default_data": "bar", "path": "foo.FILE.p"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert os.path.isfile("foo.FILE.p")
         assert dn.read() == "bar"
         dn.write("qux")
@@ -115,10 +116,12 @@ class TestPickleDataNodeEntity:
 
     def test_read_and_write(self):
         no_data_dn = PickleDataNode("foo", Scope.SCENARIO)
+        _DataManagerFactory._build_manager()._repository._save(no_data_dn)
         with pytest.raises(NoData):
             assert no_data_dn.read() is None
             no_data_dn.read_or_raise()
         pickle_str = PickleDataNode("foo", Scope.SCENARIO, properties={"default_data": "bar"})
+        _DataManagerFactory._build_manager()._repository._save(pickle_str)
         assert isinstance(pickle_str.read(), str)
         assert pickle_str.read() == "bar"
         pickle_str.properties["default_data"] = "baz"  # this modifies the default data value but not the data itself
@@ -147,16 +150,19 @@ class TestPickleDataNodeEntity:
                 "path": "bar.FILE.p",
             },
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "bar.FILE.p"
 
     def test_set_path(self):
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.p"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.path == "foo.p"
         dn.path = "bar.p"
         assert dn.path == "bar.p"
 
     def test_is_generated(self):
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert dn.is_generated
         dn.path = "bar.p"
         assert not dn.is_generated
@@ -165,6 +171,7 @@ class TestPickleDataNodeEntity:
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.p")
         new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.p")
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         read_data = dn.read()
         assert read_data is not None
         dn.path = new_path
@@ -177,6 +184,7 @@ class TestPickleDataNodeEntity:
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.pickle"))
         pd.DataFrame([]).to_pickle(temp_file_path)
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         dn.write(pd.DataFrame([1, 2, 3]))
         previous_edit_date = dn.last_edit_date
@@ -246,6 +254,7 @@ class TestPickleDataNodeEntity:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"path": old_pickle_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 
@@ -263,6 +272,7 @@ class TestPickleDataNodeEntity:
         old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
 
         dn = PickleDataNode("foo", Scope.SCENARIO, properties={"path": old_pickle_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(old_data)
         old_last_edit_date = dn.last_edit_date
 

+ 2 - 0
tests/core/data/test_read_parquet_data_node.py

@@ -18,6 +18,7 @@ import pandas as pd
 import pytest
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.parquet import ParquetDataNode
 from taipy.core.exceptions.exceptions import NoData
 
@@ -170,6 +171,7 @@ class TestReadParquetDataNode:
         dn = ParquetDataNode(
             "foo", Scope.SCENARIO, properties={"path": temp_file_path, "engine": engine, "read_kwargs": read_kwargs}
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         df = pd.read_csv(os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv"))
         dn.write(df)

+ 7 - 2
tests/core/data/test_sql_data_node.py

@@ -110,7 +110,7 @@ class TestSQLDataNode:
     @pytest.mark.parametrize("properties", __sql_properties)
     def test_create(self, properties):
         sql_dn_config = Config.configure_sql_data_node(id="foo_bar", **properties)
-        dn = _DataManagerFactory._build_manager()._create_and_set(sql_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(sql_dn_config, None, None)
         assert isinstance(dn, SQLDataNode)
         assert dn.storage_type() == "sql"
         assert dn.config_id == "foo_bar"
@@ -129,7 +129,7 @@ class TestSQLDataNode:
             append_query_builder=my_append_query_builder_with_pandas,
             exposed_type=MyCustomObject,
         )
-        dn_1 = _DataManagerFactory._build_manager()._create_and_set(sql_dn_config_1, None, None)
+        dn_1 = _DataManagerFactory._build_manager()._create(sql_dn_config_1, None, None)
         assert isinstance(dn, SQLDataNode)
         assert dn_1.properties["exposed_type"] == MyCustomObject
         assert dn_1.properties["append_query_builder"] == my_append_query_builder_with_pandas
@@ -199,6 +199,7 @@ class TestSQLDataNode:
         custom_properties = properties.copy()
         custom_properties.pop("db_extra_args")
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
             # mock connection execute
             dn.write(pd.DataFrame({"foo": [1, 2, 3], "bar": [4, 5, 6]}))
@@ -214,6 +215,7 @@ class TestSQLDataNode:
 
         custom_properties["write_query_builder"] = single_write_query_builder
         dn = SQLDataNode("foo_bar", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         with patch("sqlalchemy.engine.Engine.connect") as engine_mock:
             # mock connection execute
@@ -257,6 +259,7 @@ class TestSQLDataNode:
         }
 
         dn = SQLDataNode("sqlite_dn", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         original_data = pd.DataFrame([{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}])
         data = dn.read()
         assert_frame_equal(data, original_data)
@@ -292,6 +295,7 @@ class TestSQLDataNode:
             "exposed_type": "pandas",
         }
         dn = SQLDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             pd.DataFrame(
                 [
@@ -348,6 +352,7 @@ class TestSQLDataNode:
             "exposed_type": "numpy",
         }
         dn = SQLDataNode("foo", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(
             pd.DataFrame(
                 [

+ 3 - 2
tests/core/data/test_sql_table_data_node.py

@@ -88,7 +88,7 @@ class TestSQLTableDataNode:
     @pytest.mark.parametrize("properties", __sql_properties)
     def test_create(self, properties):
         sql_table_dn_config = Config.configure_sql_table_data_node("foo_bar", **properties)
-        dn = _DataManagerFactory._build_manager()._create_and_set(sql_table_dn_config, None, None)
+        dn = _DataManagerFactory._build_manager()._create(sql_table_dn_config, None, None)
         assert isinstance(dn, SQLTableDataNode)
         assert dn.storage_type() == "sql_table"
         assert dn.config_id == "foo_bar"
@@ -104,7 +104,7 @@ class TestSQLTableDataNode:
         sql_table_dn_config_1 = Config.configure_sql_table_data_node(
             "foo_bar", **properties, exposed_type=MyCustomObject
         )
-        dn_1 = _DataManagerFactory._build_manager()._create_and_set(sql_table_dn_config_1, None, None)
+        dn_1 = _DataManagerFactory._build_manager()._create(sql_table_dn_config_1, None, None)
         assert isinstance(dn_1, SQLTableDataNode)
         assert dn_1.properties["exposed_type"] == MyCustomObject
 
@@ -158,6 +158,7 @@ class TestSQLTableDataNode:
             Scope.SCENARIO,
             properties=properties,
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
         assert dn._engine is None
 

+ 11 - 0
tests/core/data/test_write_csv_data_node.py

@@ -19,6 +19,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.csv import CSVDataNode
 
 
@@ -60,6 +61,7 @@ class MyCustomObject:
 )
 def test_append(csv_file, default_data_frame, content):
     csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
     assert_frame_equal(csv_dn.read(), default_data_frame)
 
     csv_dn.append(content)
@@ -71,6 +73,7 @@ def test_append(csv_file, default_data_frame, content):
 
 def test_write_with_header_pandas(tmp_csv_file):
     csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": tmp_csv_file})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     df = pd.DataFrame([{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}])
     csv_dn.write(df)
@@ -89,6 +92,7 @@ def test_write_with_header_pandas(tmp_csv_file):
 
 def test_write_with_header_numpy(tmp_csv_file):
     csv_dn = CSVDataNode("bar", Scope.SCENARIO, properties={"path": tmp_csv_file, "exposed_type": "numpy"})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     csv_dn.write(arr)
@@ -104,6 +108,7 @@ def test_write_with_header_numpy(tmp_csv_file):
 
 def test_write_with_header_custom_exposed_type(tmp_csv_file):
     csv_dn = CSVDataNode("bar", Scope.SCENARIO, properties={"path": tmp_csv_file, "exposed_type": MyCustomObject})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     csv_dn.write(data)
@@ -115,6 +120,7 @@ def test_write_with_header_custom_exposed_type(tmp_csv_file):
 
 def test_write_without_header_pandas(tmp_csv_file):
     csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": tmp_csv_file, "has_header": False})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     df = pd.DataFrame([*zip([1, 2, 3], [4, 5, 6])])
     csv_dn.write(df)
@@ -135,6 +141,7 @@ def test_write_without_header_numpy(tmp_csv_file):
     csv_dn = CSVDataNode(
         "bar", Scope.SCENARIO, properties={"path": tmp_csv_file, "exposed_type": "numpy", "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     csv_dn.write(arr)
@@ -152,6 +159,7 @@ def test_write_without_header_custom_exposed_type(tmp_csv_file):
     csv_dn = CSVDataNode(
         "bar", Scope.SCENARIO, properties={"path": tmp_csv_file, "exposed_type": MyCustomObject, "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
 
     data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     csv_dn.write(data)
@@ -166,6 +174,8 @@ def test_write_with_different_encoding(csv_file):
 
     utf8_dn = CSVDataNode("utf8_dn", Scope.SCENARIO, properties={"default_path": csv_file})
     utf16_dn = CSVDataNode("utf16_dn", Scope.SCENARIO, properties={"default_path": csv_file, "encoding": "utf-16"})
+    _DataManagerFactory._build_manager()._repository._save(utf8_dn)
+    _DataManagerFactory._build_manager()._repository._save(utf16_dn)
 
     # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding
     utf8_dn.write(data)
@@ -185,6 +195,7 @@ def test_write_with_column_names(tmp_csv_file):
     columns = ["e", "f", "g"]
 
     csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": tmp_csv_file})
+    _DataManagerFactory._build_manager()._repository._save(csv_dn)
     csv_dn.write_with_column_names(data, columns)
     df = pd.DataFrame(data, columns=columns)
     assert pd.DataFrame.equals(df, csv_dn.read())

+ 16 - 0
tests/core/data/test_write_multiple_sheet_excel_data_node.py

@@ -19,6 +19,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.excel import ExcelDataNode
 from taipy.core.exceptions.exceptions import SheetNameLengthMismatch
 
@@ -62,6 +63,7 @@ sheet_names = ["Sheet1", "Sheet2"]
 
 def test_write_with_header_multiple_sheet_pandas_with_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": sheet_names})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df_1 = pd.DataFrame([{"a": 1, "b": 2, "c": 3}])
     df_2 = pd.DataFrame([{"a": 4, "b": 5, "c": 6}])
@@ -91,6 +93,7 @@ def test_write_with_header_multiple_sheet_pandas_with_sheet_name(tmp_excel_file)
 
 def test_write_with_header_multiple_sheet_pandas_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df_1 = pd.DataFrame([{"a": 1, "b": 2, "c": 3}])
     df_2 = pd.DataFrame([{"a": 4, "b": 5, "c": 6}])
@@ -122,6 +125,7 @@ def test_write_with_header_multiple_sheet_numpy_with_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": sheet_names, "exposed_type": "numpy"}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr_1 = np.array([[1], [2], [3]])
     arr_2 = np.array([[4], [5], [6]])
@@ -146,6 +150,7 @@ def test_write_with_header_multiple_sheet_numpy_with_sheet_name(tmp_excel_file):
 
 def test_write_with_header_multiple_sheet_numpy_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": "numpy"})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr_1 = np.array([[1], [2], [3]])
     arr_2 = np.array([[4], [5], [6]])
@@ -174,6 +179,7 @@ def test_write_with_header_multiple_sheet_custom_exposed_type_with_sheet_name(tm
         Scope.SCENARIO,
         properties={"path": tmp_excel_file, "sheet_name": sheet_names, "exposed_type": MyCustomObject},
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
     row_1 = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     row_2 = [MyCustomObject(0, 4, "hello"), MyCustomObject(1, 5, "abc"), MyCustomObject(2, 6, ".")]
     sheet_data = {"Sheet1": row_1, "Sheet2": row_2}
@@ -187,6 +193,7 @@ def test_write_with_header_multiple_sheet_custom_exposed_type_with_sheet_name(tm
 
 def test_write_with_header_multiple_sheet_custom_exposed_type_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": MyCustomObject})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     row_1 = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     row_2 = [MyCustomObject(0, 4, "hello"), MyCustomObject(1, 5, "abc"), MyCustomObject(2, 6, ".")]
@@ -203,6 +210,7 @@ def test_write_without_header_multiple_sheet_pandas_with_sheet_name(tmp_excel_fi
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": sheet_names, "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df_1 = pd.DataFrame([*zip([1, 2, 3])])
     df_2 = pd.DataFrame([*zip([4, 5, 6])])
@@ -232,6 +240,7 @@ def test_write_without_header_multiple_sheet_pandas_with_sheet_name(tmp_excel_fi
 
 def test_write_without_header_multiple_sheet_pandas_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "has_header": False})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df_1 = pd.DataFrame([*zip([1, 2, 3])])
     df_2 = pd.DataFrame([*zip([4, 5, 6])])
@@ -265,6 +274,7 @@ def test_write_without_header_multiple_sheet_numpy_with_sheet_name(tmp_excel_fil
         Scope.SCENARIO,
         properties={"path": tmp_excel_file, "sheet_name": sheet_names, "exposed_type": "numpy", "has_header": False},
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr_1 = np.array([[1], [2], [3]])
     arr_2 = np.array([[4], [5], [6]])
@@ -291,6 +301,7 @@ def test_write_without_header_multiple_sheet_numpy_without_sheet_name(tmp_excel_
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": "numpy", "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr_1 = np.array([[1], [2], [3]])
     arr_2 = np.array([[4], [5], [6]])
@@ -324,6 +335,7 @@ def test_write_without_header_multiple_sheet_custom_exposed_type_with_sheet_name
             "has_header": False,
         },
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     row_1 = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     row_2 = [MyCustomObject(0, 4, "hello"), MyCustomObject(1, 5, "abc"), MyCustomObject(2, 6, ".")]
@@ -340,6 +352,7 @@ def test_write_without_header_multiple_sheet_custom_exposed_type_without_sheet_n
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": MyCustomObject, "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     row_1 = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     row_2 = [MyCustomObject(0, 4, "hello"), MyCustomObject(1, 5, "abc"), MyCustomObject(2, 6, ".")]
@@ -354,6 +367,7 @@ def test_write_without_header_multiple_sheet_custom_exposed_type_without_sheet_n
 
 def test_write_empty_multiple_sheet_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     excel_dn.write(None)
     assert len(excel_dn.read()) == 1
@@ -370,6 +384,7 @@ def test_write_empty_multiple_sheet_without_sheet_name(tmp_excel_file):
 
 def test_write_empty_multiple_sheet_with_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": sheet_names})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     # Multisheet excel dn can only be written with a dictionary of data
     with pytest.raises(SheetNameLengthMismatch):
@@ -408,6 +423,7 @@ def test_append_pandas_multisheet(excel_file_with_multi_sheet, default_multi_she
     dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": excel_file_with_multi_sheet, "sheet_name": ["Sheet1", "Sheet2"]}
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     assert_frame_equal(dn.read()["Sheet1"], default_multi_sheet_data_frame["Sheet1"])
     assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
 

+ 11 - 0
tests/core/data/test_write_parquet_data_node.py

@@ -19,6 +19,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.parquet import ParquetDataNode
 
 
@@ -60,6 +61,7 @@ class TestWriteParquetDataNode:
         path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
         new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.parquet")
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "engine": engine})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         read_data = dn.read()
         assert read_data is not None
         dn.path = new_path
@@ -71,6 +73,7 @@ class TestWriteParquetDataNode:
     def test_write_pandas(self, tmpdir_factory):
         temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
         parquet_dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
+        _DataManagerFactory._build_manager()._repository._save(parquet_dn)
 
         df = pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])
         parquet_dn.write(df)
@@ -97,6 +100,7 @@ class TestWriteParquetDataNode:
         parquet_dn = ParquetDataNode(
             "foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "numpy"}
         )
+        _DataManagerFactory._build_manager()._repository._save(parquet_dn)
 
         arr = np.array([[1], [2], [3], [4], [5]])
         parquet_dn.write(arr)
@@ -114,6 +118,7 @@ class TestWriteParquetDataNode:
         parquet_dn = ParquetDataNode(
             "foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": MyCustomObject}
         )
+        _DataManagerFactory._build_manager()._repository._save(parquet_dn)
 
         data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
         parquet_dn.write(data)
@@ -139,6 +144,7 @@ class TestWriteParquetDataNode:
         dn = ParquetDataNode(
             "foo", Scope.SCENARIO, properties={"path": temp_file_path, "engine": engine, "compression": comp3}
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(df)
         df.to_parquet(path=temp_file_2_path, compression=comp3, engine=engine)
         with open(temp_file_2_path, "rb") as tf:
@@ -157,6 +163,7 @@ class TestWriteParquetDataNode:
                 "write_kwargs": {"compression": comp2},
             },
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(df)
         df.to_parquet(path=temp_file_2_path, compression=comp2, engine=engine)
         with open(temp_file_2_path, "rb") as tf:
@@ -175,6 +182,7 @@ class TestWriteParquetDataNode:
                 "write_kwargs": {"compression": comp2},
             },
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn._write_with_kwargs(df, compression=comp1)
         df.to_parquet(path=temp_file_2_path, compression=comp1, engine=engine)
         with open(temp_file_2_path, "rb") as tf:
@@ -190,6 +198,7 @@ class TestWriteParquetDataNode:
             Scope.SCENARIO,
             properties={"path": temp_file_path, "engine": engine, "read_kwargs": {"columns": cols2}},
         )
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert set(dn.read().columns) == set(cols2)
 
         # 1
@@ -206,6 +215,7 @@ class TestWriteParquetDataNode:
 
         write_kwargs = {"partition_cols": ["a", "b"]}
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_dir_path, "write_kwargs": write_kwargs})  # type: ignore
+        _DataManagerFactory._build_manager()._repository._save(dn)
         dn.write(default_data_frame)
 
         assert pathlib.Path(temp_dir_path).is_dir()
@@ -227,6 +237,7 @@ class TestWriteParquetDataNode:
     )
     def test_append_pandas(self, parquet_file_path, default_data_frame, content):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path})
+        _DataManagerFactory._build_manager()._repository._save(dn)
         assert_frame_equal(dn.read(), default_data_frame)
 
         dn.append(content)

+ 18 - 0
tests/core/data/test_write_single_sheet_excel_data_node.py

@@ -18,6 +18,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.excel import ExcelDataNode
 from taipy.core.exceptions.exceptions import SheetNameLengthMismatch
 
@@ -58,6 +59,7 @@ class MyCustomObject:
 
 def test_write_with_header_single_sheet_pandas_with_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": "Sheet1"})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df = pd.DataFrame([{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}])
 
@@ -80,6 +82,7 @@ def test_write_with_header_single_sheet_pandas_with_sheet_name(tmp_excel_file):
 
 def test_write_with_header_single_sheet_pandas_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df = pd.DataFrame([{"a": 1, "b": 2, "c": 3}, {"a": 4, "b": 5, "c": 6}])
 
@@ -115,6 +118,7 @@ def test_write_without_header_single_sheet_pandas_with_sheet_name(tmp_excel_file
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": "Sheet1", "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df = pd.DataFrame([*zip([1, 2, 3], [4, 5, 6])])
 
@@ -137,6 +141,7 @@ def test_write_without_header_single_sheet_pandas_with_sheet_name(tmp_excel_file
 
 def test_write_without_header_single_sheet_pandas_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "has_header": False})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     df = pd.DataFrame([*zip([1, 2, 3], [4, 5, 6])])
 
@@ -171,6 +176,7 @@ def test_write_with_header_single_sheet_numpy_with_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "sheet_name": "Sheet1", "exposed_type": "numpy"}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     excel_dn.write(arr)
@@ -186,6 +192,7 @@ def test_write_with_header_single_sheet_numpy_with_sheet_name(tmp_excel_file):
 
 def test_write_with_header_single_sheet_numpy_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": "numpy"})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     excel_dn.write(arr)
@@ -211,6 +218,7 @@ def test_write_without_header_single_sheet_numpy_with_sheet_name(tmp_excel_file)
         Scope.SCENARIO,
         properties={"path": tmp_excel_file, "sheet_name": "Sheet1", "exposed_type": "numpy", "has_header": False},
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     excel_dn.write(arr)
@@ -228,6 +236,7 @@ def test_write_without_header_single_sheet_numpy_without_sheet_name(tmp_excel_fi
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": "numpy", "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     arr = np.array([[1], [2], [3], [4], [5]])
     excel_dn.write(arr)
@@ -253,6 +262,7 @@ def test_write_with_header_single_sheet_custom_exposed_type_with_sheet_name(tmp_
         Scope.SCENARIO,
         properties={"path": tmp_excel_file, "sheet_name": "Sheet1", "exposed_type": MyCustomObject},
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
     expected_data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
 
     excel_dn.write(expected_data)
@@ -267,6 +277,7 @@ def test_write_with_header_single_sheet_custom_exposed_type_with_sheet_name(tmp_
 
 def test_write_with_header_single_sheet_custom_exposed_type_without_sheet_name(tmp_excel_file):
     excel_dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": MyCustomObject})
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     excel_dn.write(data)
@@ -291,6 +302,7 @@ def test_write_without_header_single_sheet_custom_exposed_type_with_sheet_name(t
             "has_header": False,
         },
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     excel_dn.write(data)
@@ -304,6 +316,7 @@ def test_write_without_header_single_sheet_custom_exposed_type_without_sheet_nam
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": tmp_excel_file, "exposed_type": MyCustomObject, "has_header": False}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
 
     data = [MyCustomObject(0, 1, "hi"), MyCustomObject(1, 2, "world"), MyCustomObject(2, 3, "text")]
     excel_dn.write(data)
@@ -323,6 +336,7 @@ def test_raise_write_with_sheet_name_length_mismatch(excel_file_with_sheet_name)
         Scope.SCENARIO,
         properties={"path": excel_file_with_sheet_name, "sheet_name": ["sheet_name_1", "sheet_name_2"]},
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
     with pytest.raises(SheetNameLengthMismatch):
         excel_dn.write([])
 
@@ -338,6 +352,7 @@ def test_write_with_column_and_sheet_name(excel_file_with_sheet_name, default_da
     excel_dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": excel_file_with_sheet_name, "sheet_name": sheet_name}
     )
+    _DataManagerFactory._build_manager()._repository._save(excel_dn)
     df = pd.DataFrame(content)
 
     if isinstance(sheet_name, str):
@@ -375,6 +390,7 @@ def test_write_with_column_and_sheet_name(excel_file_with_sheet_name, default_da
 )
 def test_append_pandas_with_sheetname(excel_file, default_data_frame, content):
     dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1"})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     assert_frame_equal(dn.read(), default_data_frame)
 
     dn.append(content)
@@ -395,6 +411,7 @@ def test_append_pandas_with_sheetname(excel_file, default_data_frame, content):
 )
 def test_append_pandas_without_sheetname(excel_file, default_data_frame, content):
     dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file})
+    _DataManagerFactory._build_manager()._repository._save(dn)
     assert_frame_equal(dn.read()["Sheet1"], default_data_frame)
 
     dn.append(content)
@@ -419,6 +436,7 @@ def test_append_only_first_sheet_of_a_multisheet_file(
     dn = ExcelDataNode(
         "foo", Scope.SCENARIO, properties={"path": excel_file_with_multi_sheet, "sheet_name": ["Sheet1", "Sheet2"]}
     )
+    _DataManagerFactory._build_manager()._repository._save(dn)
     assert_frame_equal(dn.read()["Sheet1"], default_multi_sheet_data_frame["Sheet1"])
     assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
 

+ 5 - 0
tests/core/data/test_write_sql_table_data_node.py

@@ -18,6 +18,7 @@ import pytest
 from pandas.testing import assert_frame_equal
 
 from taipy import Scope
+from taipy.core.data._data_manager_factory import _DataManagerFactory
 from taipy.core.data.sql_table import SQLTableDataNode
 
 
@@ -87,6 +88,7 @@ class TestWriteSQLTableDataNode:
         custom_properties = properties.copy()
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(sql_table_dn)
 
         with (
             patch("sqlalchemy.engine.Engine.connect") as engine_mock,
@@ -119,6 +121,7 @@ class TestWriteSQLTableDataNode:
         custom_properties["exposed_type"] = "numpy"
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(sql_table_dn)
 
         with (
             patch("sqlalchemy.engine.Engine.connect") as engine_mock,
@@ -147,6 +150,7 @@ class TestWriteSQLTableDataNode:
         custom_properties["exposed_type"] = MyCustomObject
         custom_properties.pop("db_extra_args")
         sql_table_dn = SQLTableDataNode("foo", Scope.SCENARIO, properties=custom_properties)
+        _DataManagerFactory._build_manager()._repository._save(sql_table_dn)
 
         with (
             patch("sqlalchemy.engine.Engine.connect") as engine_mock,
@@ -180,6 +184,7 @@ class TestWriteSQLTableDataNode:
         }
 
         dn = SQLTableDataNode("sqlite_dn", Scope.SCENARIO, properties=properties)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         original_data = pd.DataFrame([{"foo": 1, "bar": 2}, {"foo": 3, "bar": 4}])
         data = dn.read()
         assert_frame_equal(data, original_data)

+ 18 - 11
tests/core/job/test_job.py

@@ -90,11 +90,11 @@ def _error():
 
 
 def test_job_equals(job):
-    _TaskManagerFactory._build_manager()._set(job.task)
+    _TaskManagerFactory._build_manager()._repository._save(job.task)
     job_manager = _JobManagerFactory()._build_manager()
 
     job_id = job.id
-    job_manager._set(job)
+    job_manager._repository._save(job)
 
     # To test if instance is same type
     task = Task("task", {}, print, [], [], job_id)
@@ -108,7 +108,7 @@ def test_job_equals(job):
 def test_create_job(scenario, task, job):
     from taipy.core.scenario._scenario_manager_factory import _ScenarioManagerFactory
 
-    _ScenarioManagerFactory._build_manager()._set(scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     assert job.id == "id1"
     assert task in job
@@ -143,6 +143,7 @@ def test_comparison(task):
 def test_status_job(task):
     submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX, task.config_id)
     job = Job("job_id", task, submission.id, "SCENARIO_scenario_config")
+    _JobManagerFactory._build_manager()._repository._save(job)
     submission.jobs = [job]
 
     assert job.is_submitted()
@@ -173,6 +174,7 @@ def test_status_job(task):
 def test_stacktrace_job(task):
     submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX, task.config_id)
     job = Job("job_id", task, submission.id, "SCENARIO_scenario_config")
+    _JobManagerFactory._build_manager()._repository._save(job)
 
     fake_stacktraces = [
         """Traceback (most recent call last):
@@ -190,6 +192,7 @@ def test_notification_job(task):
     subscribe = MagicMock()
     submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX, task.config_id)
     job = Job("job_id", task, submission.id, "SCENARIO_scenario_config")
+    _JobManagerFactory._build_manager()._repository._save(job)
     submission.jobs = [job]
 
     job._on_status_change(subscribe)
@@ -222,8 +225,10 @@ def test_handle_exception_in_user_function(task_id, job_id):
 def test_handle_exception_in_input_data_node(task_id, job_id):
     data_node = InMemoryDataNode("data_node", scope=Scope.SCENARIO)
     task = Task(config_id="name", properties={}, input=[data_node], function=print, output=[], id=task_id)
+    _TaskManagerFactory._build_manager()._create(task)
     submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX, task.config_id)
     job = Job(job_id, task, submission.id, "scenario_entity_id")
+    _JobManagerFactory._build_manager()._repository._save(job)
     submission.jobs = [job]
 
     _dispatch(task, job)
@@ -236,8 +241,10 @@ def test_handle_exception_in_input_data_node(task_id, job_id):
 def test_handle_exception_in_ouptut_data_node(replace_in_memory_write_fct, task_id, job_id):
     data_node = InMemoryDataNode("data_node", scope=Scope.SCENARIO)
     task = Task(config_id="name", properties={}, input=[], function=_foo, output=[data_node], id=task_id)
+    _TaskManagerFactory._build_manager()._create(task)
     submission = _SubmissionManagerFactory._build_manager()._create(task.id, task._ID_PREFIX, task.config_id)
     job = Job(job_id, task, submission.id, "scenario_entity_id")
+    _JobManagerFactory._build_manager()._repository._save(job)
     submission.jobs = [job]
 
     _dispatch(task, job)
@@ -248,16 +255,16 @@ def test_handle_exception_in_ouptut_data_node(replace_in_memory_write_fct, task_
     assert "taipy.core.exceptions.exceptions.DataNodeWritingError" in str(job.stacktrace[0])
 
 
-def test_auto_set_and_reload(current_datetime, job_id):
+def test_auto_update_and_reload(current_datetime, job_id):
     task_1 = Task(config_id="name_1", properties={}, function=_foo, id=TaskId("task_1"))
     task_2 = Task(config_id="name_2", properties={}, function=_foo, id=TaskId("task_2"))
     submission = _SubmissionManagerFactory._build_manager()._create(task_1.id, task_1._ID_PREFIX, task_1.config_id)
     job_1 = Job(job_id, task_1, submission.id, "scenario_entity_id")
     submission.jobs = [job_1]
 
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _JobManager._set(job_1)
+    _TaskManager._repository._save(task_1)
+    _TaskManager._repository._save(task_2)
+    _JobManager._repository._save(job_1)
 
     job_2 = _JobManager._get(job_1, "submit_id_2")
 
@@ -334,8 +341,8 @@ def test_status_records(job_id):
         job_1 = Job(job_id, task_1, submission.id, "scenario_entity_id")
     submission.jobs = [job_1]
 
-    _TaskManager._set(task_1)
-    _JobManager._set(job_1)
+    _TaskManager._repository._save(task_1)
+    _JobManager._repository._save(job_1)
 
     assert job_1._status_change_records == {"SUBMITTED": datetime(2024, 9, 25, 13, 30, 30)}
     assert job_1.submitted_at == datetime(2024, 9, 25, 13, 30, 30)
@@ -397,8 +404,8 @@ def test_is_deletable():
 
 def _dispatch(task: Task, job: Job, mode=JobConfig._DEVELOPMENT_MODE):
     Config.configure_job_executions(mode=mode)
-    _TaskManager._set(task)
-    _JobManager._set(job)
+    _TaskManager._repository._save(task)
+    _JobManager._repository._save(job)
     dispatcher: Union[_StandaloneJobDispatcher, _DevelopmentJobDispatcher] = _StandaloneJobDispatcher(
         cast(_AbstractOrchestrator, _OrchestratorFactory._orchestrator)
     )

+ 13 - 13
tests/core/job/test_job_manager.py

@@ -144,11 +144,11 @@ def test_raise_when_trying_to_delete_unfinished_job():
     lock = m.Lock()
     dnm = _DataManagerFactory._build_manager()
     dn_1 = InMemoryDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dnm._set(dn_1)
+    dnm._repository._save(dn_1)
     dn_2 = InMemoryDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    dnm._set(dn_2)
+    dnm._repository._save(dn_2)
     dn_3 = InMemoryDataNode("dn_config_3", Scope.SCENARIO)
-    dnm._set(dn_3)
+    dnm._repository._save(dn_3)
     task = Task(
         "task_config_1", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id="raise_when_delete_unfinished"
     )
@@ -171,11 +171,11 @@ def test_force_deleting_unfinished_job():
     lock = m.Lock()
     dnm = _DataManagerFactory._build_manager()
     dn_1 = InMemoryDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dnm._set(dn_1)
+    dnm._repository._save(dn_1)
     dn_2 = InMemoryDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    dnm._set(dn_2)
+    dnm._repository._save(dn_2)
     dn_3 = InMemoryDataNode("dn_config_3", Scope.SCENARIO)
-    dnm._set(dn_3)
+    dnm._repository._save(dn_3)
     task = Task(
         "task_config_1", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id="force_deleting_unfinished_job"
     )
@@ -289,11 +289,11 @@ def test_cancel_single_running_job():
     lock = m.Lock()
     dnm = _DataManagerFactory._build_manager()
     dn_1 = InMemoryDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dnm._set(dn_1)
+    dnm._repository._save(dn_1)
     dn_2 = InMemoryDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    dnm._set(dn_2)
+    dnm._repository._save(dn_2)
     dn_3 = InMemoryDataNode("dn_config_3", Scope.SCENARIO)
-    dnm._set(dn_3)
+    dnm._repository._save(dn_3)
     task = Task("task_config_1", {}, partial(lock_multiply, lock), [dn_1, dn_2], [dn_3], id="cancel_single_job")
 
     dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
@@ -332,10 +332,10 @@ def test_cancel_subsequent_jobs():
     submission_1 = submission_manager._create("scenario_id", Scenario._ID_PREFIX, "scenario_config_id")
     submission_2 = submission_manager._create("scenario_id", Scenario._ID_PREFIX, "scenario_config_id")
 
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _DataManager._set(dn_3)
-    _DataManager._set(dn_4)
+    _DataManager._repository._save(dn_1)
+    _DataManager._repository._save(dn_2)
+    _DataManager._repository._save(dn_3)
+    _DataManager._repository._save(dn_4)
 
     with lock_0:
         job_1 = orchestrator._lock_dn_output_and_create_job(

+ 69 - 61
tests/core/scenario/test_scenario.py

@@ -8,6 +8,7 @@
 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 # specific language governing permissions and limitations under the License.
+
 from datetime import datetime, timedelta
 from unittest import mock
 
@@ -42,7 +43,7 @@ def test_scenario_equals(scenario):
     scenario_manager = _ScenarioManagerFactory()._build_manager()
 
     scenario_id = scenario.id
-    scenario_manager._set(scenario)
+    scenario_manager._repository._save(scenario)
 
     # To test if instance is same type
     task = Task("task", {}, print, [], [], scenario_id)
@@ -135,15 +136,16 @@ def test_create_scenario_and_add_sequences():
 
     data_manager = _DataManagerFactory._build_manager()
     task_manager = _TaskManagerFactory._build_manager()
-    data_manager._set(input_1)
-    data_manager._set(output_1)
-    data_manager._set(output_2)
-    data_manager._set(additional_dn_1)
-    data_manager._set(additional_dn_2)
-    task_manager._set(task_1)
-    task_manager._set(task_2)
+    data_manager._repository._save(input_1)
+    data_manager._repository._save(output_1)
+    data_manager._repository._save(output_2)
+    data_manager._repository._save(additional_dn_1)
+    data_manager._repository._save(additional_dn_2)
+    task_manager._repository._save(task_1)
+    task_manager._repository._save(task_2)
 
     scenario = Scenario("scenario", {task_1}, {})
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
     scenario.sequences = {"sequence_1": {"tasks": [task_1]}, "sequence_2": {"tasks": []}}
     assert scenario.id is not None
     assert scenario.config_id == "scenario"
@@ -184,15 +186,16 @@ def test_create_scenario_overlapping_sequences():
     task_2 = Task("task_2", {}, print, [output_1], [output_2], TaskId("task_id_2"))
     data_manager = _DataManagerFactory._build_manager()
     task_manager = _TaskManagerFactory._build_manager()
-    data_manager._set(input_1)
-    data_manager._set(output_1)
-    data_manager._set(output_2)
-    data_manager._set(additional_dn_1)
-    data_manager._set(additional_dn_2)
-    task_manager._set(task_1)
-    task_manager._set(task_2)
+    data_manager._repository._save(input_1)
+    data_manager._repository._save(output_1)
+    data_manager._repository._save(output_2)
+    data_manager._repository._save(additional_dn_1)
+    data_manager._repository._save(additional_dn_2)
+    task_manager._repository._save(task_1)
+    task_manager._repository._save(task_2)
 
     scenario = Scenario("scenario", {task_1, task_2}, {})
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
     scenario.add_sequence("sequence_1", [task_1])
     scenario.add_sequence("sequence_2", [task_1, task_2])
     assert scenario.id is not None
@@ -227,14 +230,14 @@ def test_create_scenario_one_additional_dn():
     task_2 = Task("task_2", {}, print, [input_2], [output_2], TaskId("task_id_2"))
     data_manager = _DataManagerFactory._build_manager()
     task_manager = _TaskManagerFactory._build_manager()
-    data_manager._set(input_1)
-    data_manager._set(output_1)
-    data_manager._set(input_2)
-    data_manager._set(output_2)
-    data_manager._set(additional_dn_1)
-    data_manager._set(additional_dn_2)
-    task_manager._set(task_1)
-    task_manager._set(task_2)
+    data_manager._repository._save(input_1)
+    data_manager._repository._save(output_1)
+    data_manager._repository._save(input_2)
+    data_manager._repository._save(output_2)
+    data_manager._repository._save(additional_dn_1)
+    data_manager._repository._save(additional_dn_2)
+    task_manager._repository._save(task_1)
+    task_manager._repository._save(task_2)
 
     scenario = Scenario("scenario", set(), {}, {additional_dn_1})
     assert scenario.id is not None
@@ -258,14 +261,14 @@ def test_create_scenario_wth_additional_dns():
     task_2 = Task("task_2", {}, print, [input_2], [output_2], TaskId("task_id_2"))
     data_manager = _DataManagerFactory._build_manager()
     task_manager = _TaskManagerFactory._build_manager()
-    data_manager._set(input_1)
-    data_manager._set(output_1)
-    data_manager._set(input_2)
-    data_manager._set(output_2)
-    data_manager._set(additional_dn_1)
-    data_manager._set(additional_dn_2)
-    task_manager._set(task_1)
-    task_manager._set(task_2)
+    data_manager._repository._save(input_1)
+    data_manager._repository._save(output_1)
+    data_manager._repository._save(input_2)
+    data_manager._repository._save(output_2)
+    data_manager._repository._save(additional_dn_1)
+    data_manager._repository._save(additional_dn_2)
+    task_manager._repository._save(task_1)
+    task_manager._repository._save(task_2)
 
     scenario = Scenario("scenario", set(), {}, {additional_dn_1, additional_dn_2})
     assert scenario.id is not None
@@ -353,9 +356,9 @@ def test_adding_sequence_raises_tasks_not_in_scenario(data_node):
     scenario = Scenario("scenario", [task_1], {})
     scenario_manager = _ScenarioManagerFactory._build_manager()
     task_manager = _TaskManagerFactory._build_manager()
-    scenario_manager._set(scenario)
-    task_manager._set(task_1)
-    task_manager._set(task_2)
+    scenario_manager._repository._save(scenario)
+    task_manager._create(task_1)
+    task_manager._create(task_2)
 
     scenario.add_sequences({"sequence_1": {}})
 
@@ -379,11 +382,11 @@ def test_adding_sequence_raises_tasks_not_in_scenario(data_node):
 
 def test_adding_existing_sequence_raises_exception(data_node):
     task_1 = Task("task_1", {}, print, output=[data_node])
-    _TaskManagerFactory._build_manager()._set(task_1)
+    _TaskManagerFactory._build_manager()._create(task_1)
     task_2 = Task("task_2", {}, print, input=[data_node])
-    _TaskManagerFactory._build_manager()._set(task_2)
+    _TaskManagerFactory._build_manager()._create(task_2)
     scenario = Scenario("scenario", tasks={task_1, task_2}, properties={})
-    _ScenarioManagerFactory._build_manager()._set(scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     scenario.add_sequence("sequence_1", [task_1])
     with pytest.raises(SequenceAlreadyExists):
@@ -392,11 +395,11 @@ def test_adding_existing_sequence_raises_exception(data_node):
 
 def test_renaming_existing_sequence_raises_exception(data_node):
     task_1 = Task("task_1", {}, print, output=[data_node])
-    _TaskManagerFactory._build_manager()._set(task_1)
+    _TaskManagerFactory._build_manager()._create(task_1)
     task_2 = Task("task_2", {}, print, input=[data_node])
-    _TaskManagerFactory._build_manager()._set(task_2)
+    _TaskManagerFactory._build_manager()._create(task_2)
     scenario = Scenario("scenario", {task_1, task_2}, {})
-    _ScenarioManagerFactory._build_manager()._set(scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     scenario.add_sequence("sequence_1", [task_1])
     scenario.add_sequence("sequence_2", [task_2])
@@ -426,10 +429,10 @@ def test_add_rename_and_remove_sequences():
     data_manager = _DataManagerFactory._build_manager()
     scenario_manager = _ScenarioManagerFactory._build_manager()
     for dn in [data_node_1, data_node_2, data_node_3, data_node_4, data_node_5]:
-        data_manager._set(dn)
+        data_manager._repository._save(dn)
     for t in [task_1, task_2, task_3, task_4, task_5]:
-        task_manager._set(t)
-    scenario_manager._set(scenario)
+        task_manager._repository._save(t)
+    scenario_manager._repository._save(scenario)
 
     assert scenario.get_inputs() == {data_node_1, data_node_2, data_node_5}
     assert scenario._get_set_of_tasks() == {task_1, task_2, task_3, task_4, task_5}
@@ -467,6 +470,9 @@ def test_update_sequence(data_node):
     task_1 = Task("foo", {}, print, [data_node], [], TaskId("t1"))
     task_2 = Task("bar", {}, print, [], [data_node], id=TaskId("t2"))
     scenario = Scenario("baz", {task_1, task_2}, {})
+    _TaskManagerFactory._build_manager()._create(task_1)
+    _TaskManagerFactory._build_manager()._create(task_2)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
     scenario.add_sequence("seq_1", [task_1])
 
     assert len(scenario.sequences) == 1
@@ -482,10 +488,10 @@ def test_update_sequence(data_node):
 def test_add_rename_and_remove_sequences_within_context(data_node):
     task_1 = Task("task_1", {}, print, output=[data_node])
     task_2 = Task("task_2", {}, print, input=[data_node])
-    _TaskManagerFactory._build_manager()._set(task_1)
-    _TaskManagerFactory._build_manager()._set(task_2)
+    _TaskManagerFactory._build_manager()._create(task_1)
+    _TaskManagerFactory._build_manager()._create(task_2)
     scenario = Scenario(config_id="scenario", tasks={task_1, task_2}, properties={})
-    _ScenarioManagerFactory._build_manager()._set(scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     with scenario as sc:
         sc.add_sequence("seq_name", [task_1])
@@ -508,6 +514,7 @@ def test_add_rename_and_remove_sequences_within_context(data_node):
 
 def test_add_property_to_scenario():
     scenario = Scenario("foo", set(), {"key": "value"})
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
     assert scenario.properties == {"key": "value"}
     assert scenario.properties["key"] == "value"
 
@@ -520,8 +527,9 @@ def test_add_property_to_scenario():
 
 def test_add_cycle_to_scenario(cycle):
     scenario = Scenario("foo", set(), {})
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
     assert scenario.cycle is None
-    _CycleManagerFactory._build_manager()._set(cycle)
+    _CycleManagerFactory._build_manager()._repository._save(cycle)
     scenario.cycle = cycle
 
     assert scenario.cycle == cycle
@@ -529,6 +537,7 @@ def test_add_cycle_to_scenario(cycle):
 
 def test_add_and_remove_subscriber():
     scenario = Scenario("foo", set(), {})
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     scenario._add_subscriber(print)
     assert len(scenario.subscribers) == 1
@@ -548,7 +557,7 @@ def test_add_and_remove_tag():
     assert len(scenario.tags) == 0
 
 
-def test_auto_set_and_reload(cycle, current_datetime, task, data_node):
+def test_auto_update_and_reload(cycle, current_datetime, task, data_node):
     scenario_1 = Scenario(
         "foo",
         set(),
@@ -580,15 +589,14 @@ def test_auto_set_and_reload(cycle, current_datetime, task, data_node):
         SequenceId(f"SEQUENCE_{tmp_sequence_name}_{scenario_1.id}"),
     )
 
-    _TaskManagerFactory._build_manager()._set(task)
-    _DataManagerFactory._build_manager()._set(data_node)
-    _DataManagerFactory._build_manager()._set(additional_dn)
-    _CycleManagerFactory._build_manager()._set(cycle)
+    _TaskManagerFactory._build_manager()._create(task)
+    _DataManagerFactory._build_manager()._repository._save(data_node)
+    _DataManagerFactory._build_manager()._repository._save(additional_dn)
     scenario_manager = _ScenarioManagerFactory._build_manager()
     cycle_manager = _CycleManagerFactory._build_manager()
-    cycle_manager._set(cycle)
-    cycle_manager._set(tmp_cycle)
-    scenario_manager._set(scenario_1)
+    cycle_manager._repository._save(cycle)
+    cycle_manager._repository._save(tmp_cycle)
+    scenario_manager._repository._save(scenario_1)
 
     scenario_2 = scenario_manager._get(scenario_1)
     assert scenario_1.config_id == "foo"
@@ -767,7 +775,7 @@ def test_auto_set_and_reload(cycle, current_datetime, task, data_node):
     assert not scenario_1._is_in_context
 
 
-def test_auto_set_and_reload_properties():
+def test_auto_update_and_reload_properties():
     scenario_1 = Scenario(
         "foo",
         set(),
@@ -775,7 +783,7 @@ def test_auto_set_and_reload_properties():
     )
 
     scenario_manager = _ScenarioManagerFactory._build_manager()
-    scenario_manager._set(scenario_1)
+    scenario_manager._repository._save(scenario_1)
 
     scenario_2 = scenario_manager._get(scenario_1)
 
@@ -1044,11 +1052,11 @@ def test_is_ready_to_run():
     data_manager = _DataManagerFactory._build_manager()
     data_manager._delete_all()
     for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]:
-        data_manager._set(dn)
+        data_manager._repository._save(dn)
     task_manager = _TaskManagerFactory._build_manager()
     for task in [task_1, task_2, task_3, task_4, task_5, task_6]:
-        task_manager._set(task)
-    _ScenarioManagerFactory._build_manager()._set(scenario)
+        task_manager._repository._save(task)
+    _ScenarioManagerFactory._build_manager()._repository._save(scenario)
 
     assert scenario.is_ready_to_run()
 
@@ -1095,7 +1103,7 @@ def test_data_nodes_being_edited():
 
     data_manager = _DataManagerFactory._build_manager()
     for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7, data_node_8, data_node_9]:
-        data_manager._set(dn)
+        data_manager._repository._save(dn)
 
     assert len(scenario.data_nodes_being_edited()) == 0
     assert scenario.data_nodes_being_edited() == set()

+ 28 - 33
tests/core/scenario/test_scenario_manager.py

@@ -55,7 +55,7 @@ from taipy.core.task.task_id import TaskId
 from tests.core.utils.NotifyMock import NotifyMock
 
 
-def test_set_and_get_scenario(cycle):
+def test_save_and_get_scenario(cycle):
     scenario_id_1 = ScenarioId("scenario_id_1")
     scenario_1 = Scenario("scenario_name_1", [], {}, [], scenario_id_1)
 
@@ -100,7 +100,7 @@ def test_set_and_get_scenario(cycle):
     assert _ScenarioManager._get(scenario_2) is None
 
     # Save one scenario. We expect to have only one scenario stored
-    _ScenarioManager._set(scenario_1)
+    _ScenarioManager._repository._save(scenario_1)
     assert len(_ScenarioManager._get_all()) == 1
     assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id
     assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id
@@ -118,10 +118,10 @@ def test_set_and_get_scenario(cycle):
     assert _ScenarioManager._get(scenario_2) is None
 
     # Save a second scenario. Now, we expect to have a total of two scenarios stored
-    _TaskManager._set(task_2)
-    _CycleManager._set(cycle)
-    _ScenarioManager._set(scenario_2)
-    _DataManager._set(additional_dn_2)
+    _TaskManager._create(task_2)
+    _CycleManager._repository._save(cycle)
+    _ScenarioManager._repository._save(scenario_2)
+    _DataManager._repository._save(additional_dn_2)
     assert len(_ScenarioManager._get_all()) == 2
     assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id
     assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id
@@ -153,7 +153,7 @@ def test_set_and_get_scenario(cycle):
     assert _CycleManager._get(cycle.id).id == cycle.id
 
     # We save the first scenario again. We expect nothing to change
-    _ScenarioManager._set(scenario_1)
+    _ScenarioManager._update(scenario_1)
     assert len(_ScenarioManager._get_all()) == 2
     assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id
     assert _ScenarioManager._get(scenario_id_1).config_id == scenario_1.config_id
@@ -184,10 +184,10 @@ def test_set_and_get_scenario(cycle):
 
     # We save a third scenario with same id as the first one.
     # We expect the first scenario to be updated
-    _DataManager._set(additional_dn_3)
-    _TaskManager._set(task_3)
-    _TaskManager._set(scenario_2.tasks[task_name_2])
-    _ScenarioManager._set(scenario_3_with_same_id)
+    _DataManager._repository._save(additional_dn_3)
+    _TaskManager._repository._save(task_3)
+    _TaskManager._repository._save(scenario_2.tasks[task_name_2])
+    _ScenarioManager._repository._save(scenario_3_with_same_id)
     assert len(_ScenarioManager._get_all()) == 2
     assert _ScenarioManager._get(scenario_id_1).id == scenario_1.id
     assert _ScenarioManager._get(scenario_id_1).config_id == scenario_3_with_same_id.config_id
@@ -253,7 +253,7 @@ def test_get_all_on_multiple_versions_environment():
     # Only version 2.0 has the scenario with config_id = "config_id_6"
     for version in range(1, 3):
         for i in range(5):
-            _ScenarioManager._set(
+            _ScenarioManager._repository._save(
                 Scenario(f"config_id_{i+version}", [], {}, [], ScenarioId(f"id{i}_v{version}"), version=f"{version}.0")
             )
 
@@ -292,7 +292,7 @@ def test_create_scenario_does_not_modify_config():
     assert scenario.name == name_1
 
     scenario.properties["foo"] = "bar"
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._update(scenario)
     assert len(scenario_config.properties) == 0
     assert len(scenario.properties) == 2
     assert scenario.properties.get("foo") == "bar"
@@ -831,10 +831,10 @@ def test_get_set_primary_scenario():
     assert len(_ScenarioManager._get_all()) == 0
     assert len(_CycleManager._get_all()) == 0
 
-    _CycleManager._set(cycle_1)
+    _CycleManager._repository._save(cycle_1)
 
-    _ScenarioManager._set(scenario_1)
-    _ScenarioManager._set(scenario_2)
+    _ScenarioManager._repository._save(scenario_1)
+    _ScenarioManager._repository._save(scenario_2)
 
     assert len(_ScenarioManager._get_primary_scenarios()) == 0
     assert len(_ScenarioManager._get_all_by_cycle(cycle_1)) == 2
@@ -1120,7 +1120,7 @@ def test_submit():
 
         # scenario and sequence do exist, but tasks does not exist.
         # We expect an exception to be raised
-        _ScenarioManager._set(scenario)
+        _ScenarioManager._repository._save(scenario)
         with pytest.raises(NonExistingTask):
             _ScenarioManager._submit(scenario.id)
         with pytest.raises(NonExistingTask):
@@ -1129,11 +1129,11 @@ def test_submit():
         # scenario, sequence, and tasks do exist.
         # We expect all the tasks to be submitted once,
         # and respecting specific constraints on the order
-        _TaskManager._set(task_1)
-        _TaskManager._set(task_2)
-        _TaskManager._set(task_3)
-        _TaskManager._set(task_4)
-        _TaskManager._set(task_5)
+        _TaskManager._create(task_1)
+        _TaskManager._create(task_2)
+        _TaskManager._create(task_3)
+        _TaskManager._create(task_4)
+        _TaskManager._create(task_5)
         _ScenarioManager._submit(scenario.id)
         submit_calls = _TaskManager._orchestrator().submit_calls
         assert len(submit_calls) == 5
@@ -1313,13 +1313,10 @@ def test_tags():
     assert scenario_2_tags.has_tag("fst")
     assert scenario_2_tags.has_tag("scd")
 
-    # test get and set serialize/deserialize tags
-    _CycleManager._set(cycle_1)
-    _CycleManager._set(cycle_2)
-    _CycleManager._set(cycle_3)
-    _ScenarioManager._set(scenario_no_tag)
-    _ScenarioManager._set(scenario_1_tag)
-    _ScenarioManager._set(scenario_2_tags)
+    # test get and update serialize/deserialize tags
+    _ScenarioManager._repository._save(scenario_no_tag)
+    _ScenarioManager._repository._save(scenario_1_tag)
+    _ScenarioManager._repository._save(scenario_2_tags)
 
     assert len(_ScenarioManager._get(ScenarioId("scenario_no_tag")).tags) == 0
     assert not _ScenarioManager._get(ScenarioId("scenario_no_tag")).has_tag("fst")
@@ -1355,9 +1352,7 @@ def test_tags():
     assert not scenario_2_tags.has_tag("thd")
 
     _ScenarioManager._untag(scenario_no_tag, "thd")
-    _ScenarioManager._set(scenario_no_tag)
     _ScenarioManager._tag(scenario_1_tag, "fst")
-    _ScenarioManager._set(scenario_1_tag)
 
     # test getters
     assert _ScenarioManager._get_all_by_cycle_tag(cycle_3, "fst") == []
@@ -1409,7 +1404,7 @@ def test_authorized_tags():
     scenario_2_cfg = Config.configure_scenario("scenario_2", [], [], Frequency.DAILY, authorized_tags=["foo", "bar"])
 
     scenario_2 = _ScenarioManager._create(scenario_2_cfg)
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     assert len(scenario.tags) == 0
     assert len(scenario_2.tags) == 0
@@ -1579,7 +1574,7 @@ def test_can_duplicate_scenario():
 
 def test_duplicate_scenario():
     scenario = Scenario("config_id", set(), {}, set(), ScenarioId("scenario_id"))
-    with mock.patch.object(_ScenarioManager, "_can_duplicate", return_value= ReasonCollection()) as mock_can:
+    with mock.patch.object(_ScenarioManager, "_can_duplicate", return_value=ReasonCollection()) as mock_can:
         with mock.patch.object(_ScenarioDuplicator, "duplicate") as mock_duplicate:
             _ScenarioManager._duplicate(scenario)
             mock_can.assert_called_once_with(scenario)

+ 11 - 11
tests/core/sequence/test_sequence.py

@@ -482,10 +482,10 @@ def test_is_ready_to_run():
 
     data_manager = _DataManagerFactory._build_manager()
     for dn in [data_node_1, data_node_2, data_node_3, data_node_4, data_node_5, data_node_6]:
-        data_manager._set(dn)
+        data_manager._repository._save(dn)
     for task in [task_1, task_2, task_3, task_4]:
-        _TaskManager._set(task)
-    _ScenarioManager._set(scenario)
+        _TaskManager._repository._save(task)
+    _ScenarioManager._repository._save(scenario)
     scenario.add_sequence("sequence", [task_4, task_2, task_1, task_3])
     sequence = scenario.sequences["sequence"]
     # s1 ---      s5 ---> t2 ---> s4
@@ -529,7 +529,7 @@ def test_data_nodes_being_edited():
 
     data_manager = _DataManagerFactory._build_manager()
     for dn in [data_node_1, data_node_2, data_node_4, data_node_5, data_node_6, data_node_7]:
-        data_manager._set(dn)
+        data_manager._repository._save(dn)
 
     assert len(sequence.data_nodes_being_edited()) == 0
     assert sequence.data_nodes_being_edited() == set()
@@ -581,18 +581,18 @@ def test_get_set_of_tasks():
     assert sequence_1._get_set_of_tasks() == {task_1, task_2, task_3}
 
 
-def test_auto_set_and_reload(task):
+def test_auto_update_and_reload(task):
     tmp_task = Task("tmp_task_config_id", {}, print, list(task.output.values()), [], TaskId("tmp_task_id"))
     scenario = Scenario("scenario", [task, tmp_task], {}, sequences={"foo": {}})
 
-    _TaskManager._set(task)
-    _TaskManager._set(tmp_task)
-    _ScenarioManager._set(scenario)
+    _TaskManager._create(task)
+    _TaskManager._create(tmp_task)
+    _ScenarioManager._repository._save(scenario)
 
     sequence_1 = scenario.sequences["foo"]
     sequence_2 = _SequenceManager._get(sequence_1)
 
-    # auto set & reload on tasks attribute
+    # auto update & reload on tasks attribute
     assert len(sequence_1.tasks) == 0
     assert len(sequence_2.tasks) == 0
     sequence_1.tasks = [tmp_task]
@@ -661,10 +661,10 @@ def test_auto_set_and_reload(task):
     assert not sequence_1._is_in_context
 
 
-def test_auto_set_and_reload_properties():
+def test_auto_update_and_reload_properties():
     scenario = Scenario("scenario", [], {}, sequences={"foo": {}})
 
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence_1 = scenario.sequences["foo"]
     sequence_2 = _SequenceManager._get(sequence_1)

+ 43 - 39
tests/core/sequence/test_sequence_manager.py

@@ -65,20 +65,20 @@ def test_breakdown_sequence_id():
 def test_raise_sequence_does_not_belong_to_scenario():
     with pytest.raises(SequenceBelongsToNonExistingScenario):
         sequence = Sequence({"name": "sequence_name"}, [], "SEQUENCE_sequence_name_SCENARIO_scenario_id")
-        _SequenceManager._set(sequence)
+        _SequenceManager._update(sequence)
 
 
 def __init():
     input_dn = InMemoryDataNode("foo", Scope.SCENARIO)
     output_dn = InMemoryDataNode("foo", Scope.SCENARIO)
     task = Task("task", {}, print, [input_dn], [output_dn], TaskId("Task_task_id"))
-    _TaskManager._set(task)
+    _TaskManager._create(task)
     scenario = Scenario("scenario", {task}, {}, set())
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     return scenario, task
 
 
-def test_set_and_get_sequence_no_existing_sequence():
+def test_save_and_get_sequence_no_existing_sequence():
     scenario, _ = __init()
     sequence_name_1 = "p1"
     sequence_id_1 = SequenceId(f"SEQUENCE_{sequence_name_1}_{scenario.id}")
@@ -90,7 +90,7 @@ def test_set_and_get_sequence_no_existing_sequence():
     assert _SequenceManager._get("sequence") is None
 
 
-def test_set_and_get():
+def test_save_and_get():
     scenario, task = __init()
     sequence_name_1 = "p1"
     sequence_id_1 = SequenceId(f"SEQUENCE_{sequence_name_1}_{scenario.id}")
@@ -106,7 +106,6 @@ def test_set_and_get():
     assert _SequenceManager._get(sequence_id_2) is None
 
     # Save a second sequence. Now, we expect to have a total of two sequences stored
-    _TaskManager._set(task)
     scenario.add_sequences({sequence_name_2: [task]})
     sequence_2 = scenario.sequences[sequence_name_2]
     assert _SequenceManager._get(sequence_id_1).id == sequence_1.id
@@ -138,11 +137,11 @@ def test_task_parent_id_set_only_when_create():
     scenario, task = __init()
     sequence_name_1 = "p1"
 
-    with mock.patch("taipy.core.task._task_manager._TaskManager._set") as mck:
+    with mock.patch("taipy.core.task._task_manager._TaskManager._update") as mck:
         scenario.add_sequences({sequence_name_1: [task]})
         mck.assert_called_once()
 
-    with mock.patch("taipy.core.task._task_manager._TaskManager._set") as mck:
+    with mock.patch("taipy.core.task._task_manager._TaskManager._update") as mck:
         scenario.sequences[sequence_name_1]
         mck.assert_not_called()
 
@@ -151,7 +150,7 @@ def test_get_all_on_multiple_versions_environment():
     # Create 5 sequences from Scenario with 2 versions each
     for version in range(1, 3):
         for i in range(5):
-            _ScenarioManager._set(
+            _ScenarioManager._repository._save(
                 Scenario(
                     f"config_id_{i+version}",
                     [],
@@ -203,8 +202,9 @@ def test_get_all_on_multiple_versions_environment():
 def test_is_submittable():
     dn = InMemoryDataNode("dn", Scope.SCENARIO, properties={"default_data": 10})
     task = Task("task", {}, print, [dn])
+    _TaskManager._create(task)
     scenario = Scenario("scenario", {task}, {}, set())
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     rc = _SequenceManager._is_submittable("some_sequence")
     assert not rc
@@ -247,6 +247,10 @@ def test_submit():
     task_2 = Task("garply", {}, print, [data_node_3], [data_node_5], TaskId("t2"))
     task_3 = Task("waldo", {}, print, [data_node_5, data_node_4], [data_node_6], TaskId("t3"))
     task_4 = Task("fred", {}, print, [data_node_4], [data_node_7], TaskId("t4"))
+    _TaskManager._create(task_1)
+    _TaskManager._create(task_2)
+    _TaskManager._create(task_3)
+    _TaskManager._create(task_4)
     scenario = Scenario("sce", {task_1, task_2, task_3, task_4}, {})
 
     sequence_name = "sequence"
@@ -272,15 +276,15 @@ def test_submit():
         with pytest.raises(NonExistingSequence):
             _SequenceManager._submit(sequence_id)
 
-        _ScenarioManager._set(scenario)
+        _ScenarioManager._repository._save(scenario)
         scenario.add_sequences({sequence_name: [task_4, task_2, task_1, task_3]})
 
         # sequence, and tasks does exist. We expect the tasks to be submitted
         # in a specific order
-        _TaskManager._set(task_1)
-        _TaskManager._set(task_2)
-        _TaskManager._set(task_3)
-        _TaskManager._set(task_4)
+        _TaskManager._repository._save(task_1)
+        _TaskManager._repository._save(task_2)
+        _TaskManager._repository._save(task_3)
+        _TaskManager._repository._save(task_4)
         sequence = scenario.sequences[sequence_name]
 
         _SequenceManager._submit(sequence.id)
@@ -341,8 +345,8 @@ def test_submit_sequence_from_tasks_with_one_or_no_input_output():
     task_no_input_no_output = Task("task_no_input_no_output", {}, mock_function_no_input_no_output)
     scenario_1 = Scenario("scenario_1", {task_no_input_no_output}, {})
 
-    _TaskManager._set(task_no_input_no_output)
-    _ScenarioManager._set(scenario_1)
+    _TaskManager._repository._save(task_no_input_no_output)
+    _ScenarioManager._repository._save(scenario_1)
 
     scenario_1.add_sequences({"my_sequence_1": [task_no_input_no_output]})
     sequence_1 = scenario_1.sequences["my_sequence_1"]
@@ -359,11 +363,11 @@ def test_submit_sequence_from_tasks_with_one_or_no_input_output():
     )
     scenario_2 = Scenario("scenario_2", {task_one_input_no_output}, {})
 
-    _DataManager._set(data_node_input)
+    _DataManager._repository._save(data_node_input)
     data_node_input.unlock_edit()
 
-    _TaskManager._set(task_one_input_no_output)
-    _ScenarioManager._set(scenario_2)
+    _TaskManager._repository._save(task_one_input_no_output)
+    _ScenarioManager._repository._save(scenario_2)
 
     scenario_2.add_sequences({"my_sequence_2": [task_one_input_no_output]})
     sequence_2 = scenario_2.sequences["my_sequence_2"]
@@ -379,10 +383,10 @@ def test_submit_sequence_from_tasks_with_one_or_no_input_output():
     )
     scenario_3 = Scenario("scenario_3", {task_no_input_one_output}, {})
 
-    _DataManager._set(data_node_output)
+    _DataManager._repository._save(data_node_output)
     assert data_node_output.read() is None
-    _TaskManager._set(task_no_input_one_output)
-    _ScenarioManager._set(scenario_3)
+    _TaskManager._repository._save(task_no_input_one_output)
+    _ScenarioManager._repository._save(scenario_3)
 
     scenario_3.add_sequences({"my_sequence_3": [task_no_input_one_output]})
     sequence_3 = scenario_3.sequences["my_sequence_3"]
@@ -472,7 +476,7 @@ def test_sequence_notification_subscribe(mocker):
 
     tasks = _TaskManager._bulk_get_or_create(task_configs=task_configs)
     scenario = Scenario("scenario", set(tasks), {}, sequences={"by_1": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["by_1"]
 
@@ -526,7 +530,7 @@ def test_sequence_notification_subscribe_multi_param(mocker):
 
     tasks = _TaskManager._bulk_get_or_create(task_configs)
     scenario = Scenario("scenario", set(tasks), {}, sequences={"by_6": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["by_6"]
     notify = mocker.Mock()
@@ -557,7 +561,7 @@ def test_sequence_notification_unsubscribe(mocker):
 
     tasks = _TaskManager._bulk_get_or_create(task_configs)
     scenario = Scenario("scenario", set(tasks), {}, sequences={"by_6": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["by_6"]
 
@@ -586,7 +590,7 @@ def test_sequence_notification_unsubscribe_multi_param():
 
     tasks = _TaskManager._bulk_get_or_create(task_configs)
     scenario = Scenario("scenario", tasks, {}, sequences={"by_6": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["by_6"]
 
@@ -620,7 +624,7 @@ def test_sequence_notification_subscribe_all():
 
     tasks = _TaskManager._bulk_get_or_create(task_configs)
     scenario = Scenario("scenario", tasks, {}, sequences={"by_6": {"tasks": tasks}, "other_sequence": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["by_6"]
     other_sequence = scenario.sequences["other_sequence"]
@@ -640,8 +644,8 @@ def test_delete():
 
     scenario_1 = Scenario("scenario_1", set(), {}, scenario_id="SCENARIO_scenario_id_1")
     scenario_2 = Scenario("scenario_2", set(), {}, scenario_id="SCENARIO_scenario_id_2")
-    _ScenarioManager._set(scenario_1)
-    _ScenarioManager._set(scenario_2)
+    _ScenarioManager._repository._save(scenario_1)
+    _ScenarioManager._repository._save(scenario_2)
     with pytest.raises(ModelNotFound):
         _SequenceManager._delete(SequenceId(sequence_id))
 
@@ -698,8 +702,8 @@ def test_delete_version():
         version="1.1",
         sequences={"sequence_1": {}, "sequence_2": {}},
     )
-    _ScenarioManager._set(scenario_1_0)
-    _ScenarioManager._set(scenario_1_1)
+    _ScenarioManager._repository._save(scenario_1_0)
+    _ScenarioManager._repository._save(scenario_1_1)
 
     _VersionManager._set_experiment_version("1.1")
     assert len(_ScenarioManager._get_all()) == 1
@@ -727,7 +731,7 @@ def test_delete_version():
 
 def test_exists():
     scenario = Scenario("scenario", [], {}, scenario_id="SCENARIO_scenario", sequences={"sequence": {}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     assert len(_ScenarioManager._get_all()) == 1
     assert len(_SequenceManager._get_all()) == 1
     assert not _SequenceManager._exists("SEQUENCE_sequence_not_exist_SCENARIO_scenario")
@@ -743,7 +747,7 @@ def test_hard_delete_one_single_sequence_with_scenario_data_nodes():
 
     tasks = _TaskManager._bulk_get_or_create([task_config])
     scenario = Scenario("scenario", tasks, {}, sequences={"sequence": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["sequence"]
     sequence.submit()
@@ -768,7 +772,7 @@ def test_hard_delete_one_single_sequence_with_cycle_data_nodes():
 
     tasks = _TaskManager._bulk_get_or_create([task_config])
     scenario = Scenario("scenario", tasks, {}, sequences={"sequence": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
 
     sequence = scenario.sequences["sequence"]
     sequence.submit()
@@ -798,8 +802,8 @@ def test_hard_delete_shared_entities():
 
     scenario_1 = Scenario("scenario_1", tasks_scenario_1, {}, sequences={"sequence": {"tasks": tasks_scenario_1}})
     scenario_2 = Scenario("scenario_2", tasks_scenario_2, {}, sequences={"sequence": {"tasks": tasks_scenario_2}})
-    _ScenarioManager._set(scenario_1)
-    _ScenarioManager._set(scenario_2)
+    _ScenarioManager._repository._save(scenario_1)
+    _ScenarioManager._repository._save(scenario_2)
     sequence_1 = scenario_1.sequences["sequence"]
     sequence_2 = scenario_2.sequences["sequence"]
 
@@ -833,7 +837,7 @@ def test_submit_task_with_input_dn_wrong_file_path(caplog):
 
     tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg])
     scenario = Scenario("scenario", tasks, {}, sequences={"sequence": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     sequence = scenario.sequences["sequence"]
 
     pip_manager = _SequenceManagerFactory._build_manager()
@@ -865,7 +869,7 @@ def test_submit_task_with_one_input_dn_wrong_file_path(caplog):
 
     tasks = _TaskManager._bulk_get_or_create([task_cfg, task_2_cfg])
     scenario = Scenario("scenario", tasks, {}, sequences={"sequence": {"tasks": tasks}})
-    _ScenarioManager._set(scenario)
+    _ScenarioManager._repository._save(scenario)
     sequence = scenario.sequences["sequence"]
 
     pip_manager = _SequenceManagerFactory._build_manager()

+ 18 - 17
tests/core/submission/test_submission.py

@@ -29,7 +29,7 @@ def test_submission_equals(submission):
     submission_manager = _SubmissionManagerFactory()._build_manager()
 
     submission_id = submission.id
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     # To test if instance is same type
     task = Task("task", {}, print, [], [], submission_id)
@@ -122,6 +122,7 @@ def __test_update_submission_status(job_ids, expected_submission_status):
     }
 
     submission = Submission("submission_id", "ENTITY_TYPE", "entity_config_id")
+    _SubmissionManagerFactory._build_manager()._repository._save(submission)
     submission.jobs = [jobs[job_id] for job_id in job_ids]
     for job_id in job_ids:
         job = jobs[job_id]
@@ -279,16 +280,16 @@ def test_update_submission_status_with_wrong_case_abandoned_without_cancel_or_fa
     __test_update_submission_status(job_ids, expected_submission_status)
 
 
-def test_auto_set_and_reload():
+def test_auto_update_and_reload():
     task = Task(config_id="name_1", properties={}, function=print, id=TaskId("task_1"))
     submission_1 = Submission(task.id, task._ID_PREFIX, task.config_id, properties={})
     job_1 = Job("job_1", task, submission_1.id, submission_1.entity_id)
     job_2 = Job("job_2", task, submission_1.id, submission_1.entity_id)
 
-    _TaskManagerFactory._build_manager()._set(task)
-    _SubmissionManagerFactory._build_manager()._set(submission_1)
-    _JobManagerFactory._build_manager()._set(job_1)
-    _JobManagerFactory._build_manager()._set(job_2)
+    _TaskManagerFactory._build_manager()._repository._save(task)
+    _SubmissionManagerFactory._build_manager()._repository._save(submission_1)
+    _JobManagerFactory._build_manager()._repository._save(job_1)
+    _JobManagerFactory._build_manager()._repository._save(job_2)
 
     submission_2 = _SubmissionManagerFactory._build_manager()._get(submission_1)
 
@@ -369,12 +370,12 @@ def test_auto_set_and_reload():
     assert submission_2.submission_status == SubmissionStatus.PENDING
 
 
-def test_auto_set_and_reload_properties():
+def test_auto_update_and_reload_properties():
     task = Task(config_id="name_1", properties={}, function=print, id=TaskId("task_1"))
     submission_1 = Submission(task.id, task._ID_PREFIX, task.config_id, properties={})
 
-    _TaskManagerFactory._build_manager()._set(task)
-    _SubmissionManagerFactory._build_manager()._set(submission_1)
+    _TaskManagerFactory._build_manager()._repository._save(task)
+    _SubmissionManagerFactory._build_manager()._repository._save(submission_1)
 
     submission_2 = _SubmissionManagerFactory._build_manager()._get(submission_1)
 
@@ -475,7 +476,7 @@ def test_update_submission_status_with_single_job_completed(job_statuses, expect
 
     job = MockJob("job_id", Status.SUBMITTED)
     submission = Submission("submission_id", "ENTITY_TYPE", "entity_config_id")
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     assert submission.submission_status == SubmissionStatus.SUBMITTED
 
@@ -490,7 +491,7 @@ def __test_update_submission_status_with_two_jobs(job_ids, job_statuses, expecte
 
     jobs = {job_id: MockJob(job_id, Status.SUBMITTED) for job_id in job_ids}
     submission = Submission("submission_id", "ENTITY_TYPE", "entity_config_id")
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     assert submission.submission_status == SubmissionStatus.SUBMITTED
 
@@ -870,7 +871,7 @@ def test_is_finished():
     submission_manager = _SubmissionManagerFactory._build_manager()
 
     submission = Submission("entity_id", "entity_type", "entity_config_id", "submission_id")
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     assert len(submission_manager._get_all()) == 1
 
@@ -912,13 +913,13 @@ def test_execution_duration():
     job_1 = Job("job_1", task, submission.id, submission.entity_id)
     job_2 = Job("job_2", task, submission.id, submission.entity_id)
 
-    _TaskManagerFactory._build_manager()._set(task)
-    _SubmissionManagerFactory._build_manager()._set(submission)
-    _JobManagerFactory._build_manager()._set(job_1)
-    _JobManagerFactory._build_manager()._set(job_2)
+    _TaskManagerFactory._build_manager()._repository._save(task)
+    _SubmissionManagerFactory._build_manager()._repository._save(submission)
+    _JobManagerFactory._build_manager()._repository._save(job_1)
+    _JobManagerFactory._build_manager()._repository._save(job_2)
 
     submission.jobs = [job_1, job_2]
-    _SubmissionManagerFactory._build_manager()._set(submission)
+    _SubmissionManagerFactory._build_manager()._update(submission)
 
     with freezegun.freeze_time("2024-09-25 13:30:35"):
         job_1.running()

+ 12 - 9
tests/core/submission/test_submission_manager.py

@@ -62,7 +62,7 @@ def test_get_all_submission():
     submission_manager = _SubmissionManagerFactory._build_manager()
     version_manager = _VersionManagerFactory._build_manager()
 
-    submission_manager._set(
+    submission_manager._repository._save(
         Submission(
             "entity_id",
             "entity_type",
@@ -73,7 +73,7 @@ def test_get_all_submission():
     )
     for version_name in ["abc", "xyz"]:
         for i in range(10):
-            submission_manager._set(
+            submission_manager._repository._save(
                 Submission(
                     "entity_id",
                     "entity_type",
@@ -122,7 +122,7 @@ def test_delete_submission():
 
     submission = Submission("entity_id", "entity_type", "entity_config_id", "submission_id")
 
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     with pytest.raises(SubmissionNotDeletedException):
         submission_manager._delete(submission.id)
@@ -130,7 +130,9 @@ def test_delete_submission():
     submission.submission_status = SubmissionStatus.COMPLETED
 
     for i in range(10):
-        submission_manager._set(Submission("entity_id", "entity_type", "entity_config_id", f"submission_{i}"))
+        submission_manager._repository._save(
+            Submission("entity_id", "entity_type", "entity_config_id", f"submission_{i}")
+        )
 
     assert len(submission_manager._get_all()) == 11
     assert isinstance(submission_manager._get(submission.id), Submission)
@@ -147,7 +149,7 @@ def test_is_deletable():
     submission_manager = _SubmissionManagerFactory._build_manager()
 
     submission = Submission("entity_id", "entity_type", "entity_config_id", "submission_id")
-    submission_manager._set(submission)
+    submission_manager._repository._save(submission)
 
     assert len(submission_manager._get_all()) == 1
 
@@ -210,14 +212,15 @@ def test_hard_delete():
 
     task = Task("task_config_id", {}, print)
     submission = Submission(task.id, task._ID_PREFIX, task.config_id, "SUBMISSION_submission_id")
+    _SubmissionManagerFactory._build_manager()._repository._save(submission)
     job_1 = Job("JOB_job_id_1", task, submission.id, submission.entity_id)  # will be deleted with submission
     job_2 = Job("JOB_job_id_2", task, "SUBMISSION_submission_id_2", submission.entity_id)  # will not be deleted
     submission.jobs = [job_1]
 
-    task_manager._set(task)
-    submission_manager._set(submission)
-    job_manager._set(job_1)
-    job_manager._set(job_2)
+    task_manager._repository._save(task)
+    submission_manager._repository._save(submission)
+    job_manager._repository._save(job_1)
+    job_manager._repository._save(job_2)
 
     assert len(job_manager._get_all()) == 2
     assert len(submission_manager._get_all()) == 1

+ 8 - 8
tests/core/task/test_task.py

@@ -52,7 +52,7 @@ def test_task_equals(task):
     task_manager = _TaskManagerFactory()._build_manager()
 
     task_id = task.id
-    task_manager._set(task)
+    task_manager._create(task)
 
     # To test if instance is same type
     dn = CSVDataNode("foo_bar", Scope.SCENARIO, task_id)
@@ -185,13 +185,13 @@ def mock_func():
     pass
 
 
-def test_auto_set_and_reload(data_node):
+def test_auto_update_and_reload(data_node):
     task_1 = Task(
         config_id="foo", properties={}, function=print, input=None, output=None, owner_id=None, skippable=False
     )
 
-    _DataManager._set(data_node)
-    _TaskManager._set(task_1)
+    _DataManager._repository._save(data_node)
+    _TaskManager._repository._save(task_1)
 
     task_2 = _TaskManager._get(task_1)
 
@@ -219,12 +219,12 @@ def test_auto_set_and_reload(data_node):
     assert task_1.parent_ids == set()
     assert task_2.parent_ids == set()
     task_1._parent_ids.update(["sc2"])
-    _TaskManager._set(task_1)
+    _TaskManager._update(task_1)
     assert task_1.parent_ids == {"sc2"}
     assert task_2.parent_ids == {"sc2"}
     task_2._parent_ids.clear()
     task_2._parent_ids.update(["sc1"])
-    _TaskManager._set(task_2)
+    _TaskManager._update(task_2)
     assert task_1.parent_ids == {"sc1"}
     assert task_2.parent_ids == {"sc1"}
 
@@ -251,12 +251,12 @@ def test_auto_set_and_reload(data_node):
     assert not task_1._is_in_context
 
 
-def test_auto_set_and_reload_properties():
+def test_auto_update_and_reload_properties():
     task_1 = Task(
         config_id="foo", properties={}, function=print, input=None, output=None, owner_id=None, skippable=False
     )
 
-    _TaskManager._set(task_1)
+    _TaskManager._repository._save(task_1)
 
     task_2 = _TaskManager._get(task_1)
 

+ 8 - 7
tests/core/task/test_task_manager.py

@@ -61,7 +61,7 @@ def test_do_not_recreate_existing_data_node():
     input_config = Config.configure_data_node("my_input", "in_memory", scope=Scope.SCENARIO)
     output_config = Config.configure_data_node("my_output", "in_memory", scope=Scope.SCENARIO)
 
-    _DataManager._create_and_set(input_config, "scenario_id", "task_id")
+    _DataManager._create(input_config, "scenario_id", "task_id")
     assert len(_DataManager._get_all()) == 1
 
     task_config = Config.configure_task("foo", print, input_config, output_config)
@@ -186,7 +186,7 @@ def test_set_and_get_task():
     assert _TaskManager._get(second_task) is None
 
     # Save one task. We expect to have only one task stored
-    _TaskManager._set(first_task)
+    _TaskManager._repository._save(first_task)
     assert len(_TaskManager._get_all()) == 1
     assert _TaskManager._get(task_id_1).id == first_task.id
     assert _TaskManager._get(first_task).id == first_task.id
@@ -194,7 +194,7 @@ def test_set_and_get_task():
     assert _TaskManager._get(second_task) is None
 
     # Save a second task. Now, we expect to have a total of two tasks stored
-    _TaskManager._set(second_task)
+    _TaskManager._repository._save(second_task)
     assert len(_TaskManager._get_all()) == 2
     assert _TaskManager._get(task_id_1).id == first_task.id
     assert _TaskManager._get(first_task).id == first_task.id
@@ -202,7 +202,7 @@ def test_set_and_get_task():
     assert _TaskManager._get(second_task).id == second_task.id
 
     # We save the first task again. We expect nothing to change
-    _TaskManager._set(first_task)
+    _TaskManager._update(first_task)
     assert len(_TaskManager._get_all()) == 2
     assert _TaskManager._get(task_id_1).id == first_task.id
     assert _TaskManager._get(first_task).id == first_task.id
@@ -211,7 +211,7 @@ def test_set_and_get_task():
 
     # We save a third task with same id as the first one.
     # We expect the first task to be updated
-    _TaskManager._set(third_task_with_same_id_as_first_task)
+    _TaskManager._repository._save(third_task_with_same_id_as_first_task)
     assert len(_TaskManager._get_all()) == 2
     assert _TaskManager._get(task_id_1).id == third_task_with_same_id_as_first_task.id
     assert _TaskManager._get(task_id_1).config_id == third_task_with_same_id_as_first_task.config_id
@@ -226,7 +226,7 @@ def test_get_all_on_multiple_versions_environment():
     # Only version 2.0 has the task with config_id = "config_id_6"
     for version in range(1, 3):
         for i in range(5):
-            _TaskManager._set(
+            _TaskManager._repository._save(
                 Task(
                     f"config_id_{i+version}", {}, print, [], [], id=TaskId(f"id{i}_v{version}"), version=f"{version}.0"
                 )
@@ -356,7 +356,7 @@ def test_submit_task():
         with pytest.raises(NonExistingTask):
             _TaskManager._submit(task_1.id)
 
-        _TaskManager._set(task_1)
+        _TaskManager._create(task_1)
         _TaskManager._submit(task_1)
         call_ids = [call.id for call in MockOrchestrator.submit_calls]
         assert call_ids == [task_1.id]
@@ -485,6 +485,7 @@ def test_get_scenarios_by_config_id_in_multiple_versions_environment():
 def _create_task_from_config(task_config, *args, **kwargs):
     return _TaskManager._bulk_get_or_create([task_config], *args, **kwargs)[0]
 
+
 def test_can_duplicate():
     dn_config = Config.configure_pickle_data_node("dn", scope=Scope.SCENARIO)
     task_config = Config.configure_task("task_1", print, [dn_config])

+ 3 - 3
tests/core/task/test_task_model.py

@@ -71,7 +71,7 @@ def test_skippable_compatibility_with_no_output():
 
 def test_skippable_compatibility_with_one_output():
     manager = _DataManagerFactory._build_manager()
-    manager._set(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id"))
+    manager._repository._save(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id"))
 
     model = _TaskModel.from_dict(
         {
@@ -92,8 +92,8 @@ def test_skippable_compatibility_with_one_output():
 
 def test_skippable_compatibility_with_many_outputs():
     manager = _DataManagerFactory._build_manager()
-    manager._set(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id"))
-    manager._set(InMemoryDataNode("cfg_id_2", Scope.SCENARIO, id="dn_2_id"))
+    manager._repository._save(InMemoryDataNode("cfg_id", Scope.SCENARIO, id="dn_id"))
+    manager._repository._save(InMemoryDataNode("cfg_id_2", Scope.SCENARIO, id="dn_2_id"))
     model = _TaskModel.from_dict(
         {
             "id": "id",

+ 31 - 31
tests/core/test_taipy.py

@@ -54,24 +54,24 @@ def cb(s, j):
 
 
 class TestTaipy:
-    def test_set(self, scenario, cycle, sequence, data_node, task, submission):
-        with mock.patch("taipy.core.data._data_manager._DataManager._set") as mck:
-            tp.set(data_node)
+    def test_update(self, scenario, cycle, sequence, data_node, task, submission):
+        with mock.patch("taipy.core.data._data_manager._DataManager._update") as mck:
+            tp.update(data_node)
             mck.assert_called_once_with(data_node)
-        with mock.patch("taipy.core.task._task_manager._TaskManager._set") as mck:
-            tp.set(task)
+        with mock.patch("taipy.core.task._task_manager._TaskManager._update") as mck:
+            tp.update(task)
             mck.assert_called_once_with(task)
-        with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._set") as mck:
-            tp.set(sequence)
+        with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._update") as mck:
+            tp.update(sequence)
             mck.assert_called_once_with(sequence)
-        with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._set") as mck:
-            tp.set(scenario)
+        with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._update") as mck:
+            tp.update(scenario)
             mck.assert_called_once_with(scenario)
-        with mock.patch("taipy.core.cycle._cycle_manager._CycleManager._set") as mck:
-            tp.set(cycle)
+        with mock.patch("taipy.core.cycle._cycle_manager._CycleManager._update") as mck:
+            tp.update(cycle)
             mck.assert_called_once_with(cycle)
-        with mock.patch("taipy.core.submission._submission_manager._SubmissionManager._set") as mck:
-            tp.set(submission)
+        with mock.patch("taipy.core.submission._submission_manager._SubmissionManager._update") as mck:
+            tp.update(submission)
             mck.assert_called_once_with(submission)
 
     def test_is_editable_is_called(self, cycle, job, data_node):
@@ -150,12 +150,12 @@ class TestTaipy:
         job = Job(JobId("job_id"), task, "submit_id", scenario.id)
         dn = PickleDataNode(config_id="data_node_config_id", scope=Scope.SCENARIO)
         submission = Submission(scenario.id, scenario._ID_PREFIX, scenario.config_id, "submission_id")
-        _CycleManager._set(cycle)
-        _ScenarioManager._set(scenario)
-        _TaskManager._set(task)
-        _JobManager._set(job)
-        _DataManager._set(dn)
-        _SubmissionManager._set(submission)
+        _CycleManager._repository._save(cycle)
+        _ScenarioManager._repository._save(scenario)
+        _TaskManager._repository._save(task)
+        _JobManager._repository._save(job)
+        _DataManager._repository._save(dn)
+        _SubmissionManager._repository._save(submission)
         sequence = scenario.sequences["sequence"]
 
         assert tp.is_editable(scenario)
@@ -242,12 +242,12 @@ class TestTaipy:
         job = Job(JobId("a_job_id"), task, "submit_id", scenario.id)
         dn = PickleDataNode(config_id="a_data_node_config_id", scope=Scope.SCENARIO)
         submission = Submission(scenario.id, scenario._ID_PREFIX, scenario.config_id, "submission_id")
-        _CycleManager._set(cycle)
-        _ScenarioManager._set(scenario)
-        _TaskManager._set(task)
-        _JobManager._set(job)
-        _DataManager._set(dn)
-        _SubmissionManager._set(submission)
+        _CycleManager._repository._save(cycle)
+        _ScenarioManager._repository._save(scenario)
+        _TaskManager._repository._save(task)
+        _JobManager._repository._save(job)
+        _DataManager._repository._save(dn)
+        _SubmissionManager._repository._save(submission)
         sequence = scenario.sequences["sequence"]
 
         assert tp.is_readable(scenario)
@@ -297,11 +297,11 @@ class TestTaipy:
         job = Job(JobId("job_id"), task, "submit_id", ScenarioId(scenario.id))
         dn = PickleDataNode("data_node_config_id", Scope.SCENARIO)
 
-        _CycleManager._set(cycle)
-        _ScenarioManager._set(scenario)
-        _TaskManager._set(task)
-        _JobManager._set(job)
-        _DataManager._set(dn)
+        _CycleManager._repository._save(cycle)
+        _ScenarioManager._repository._save(scenario)
+        _TaskManager._repository._save(task)
+        _JobManager._repository._save(job)
+        _DataManager._repository._save(dn)
         sequence = scenario.sequences["sequence"]
 
         assert tp.is_submittable(scenario)
@@ -698,7 +698,7 @@ class TestTaipy:
     def test_create_global_data_node(self):
         dn_cfg_global = DataNodeConfig("id", "pickle", Scope.GLOBAL)
         dn_cfg_scenario = DataNodeConfig("id", "pickle", Scope.SCENARIO)
-        with mock.patch("taipy.core.data._data_manager._DataManager._create_and_set") as dn_create_mock:
+        with mock.patch("taipy.core.data._data_manager._DataManager._create") as dn_create_mock:
             with mock.patch("taipy.core.orchestrator.Orchestrator._manage_version_and_block_config") as mv_mock:
                 dn = tp.create_global_data_node(dn_cfg_global)
                 dn_create_mock.assert_called_once_with(dn_cfg_global, None, None)

+ 5 - 7
tests/gui_core/test_context_is_editable.py

@@ -56,10 +56,10 @@ class MockState:
 class TestGuiCoreContext_is_editable:
     @pytest.fixture(scope="class", autouse=True)
     def set_entity(self):
-        _ScenarioManagerFactory._build_manager()._set(a_scenario)
-        _TaskManagerFactory._build_manager()._set(a_task)
-        _JobManagerFactory._build_manager()._set(a_job)
-        _DataManagerFactory._build_manager()._set(a_datanode)
+        _ScenarioManagerFactory._build_manager()._repository._save(a_scenario)
+        _TaskManagerFactory._build_manager()._repository._save(a_task)
+        _JobManagerFactory._build_manager()._repository._save(a_job)
+        _DataManagerFactory._build_manager()._repository._save(a_datanode)
 
     def test_crud_scenario(self):
         with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
@@ -254,9 +254,7 @@ class TestGuiCoreContext_is_editable:
                 MockState(assign=assign),
                 "",
                 {
-                    "args": [{
-                        "id": a_datanode.id,
-                        "error_id": "error_var"}],
+                    "args": [{"id": a_datanode.id, "error_id": "error_var"}],
                 },
             )
             assign.assert_called()

+ 4 - 4
tests/gui_core/test_context_is_promotable.py

@@ -55,10 +55,10 @@ class MockState:
 class TestGuiCoreContext_is_promotable:
     @pytest.fixture(scope="class", autouse=True)
     def set_entity(self):
-        _ScenarioManagerFactory._build_manager()._set(a_scenario)
-        _TaskManagerFactory._build_manager()._set(a_task)
-        _JobManagerFactory._build_manager()._set(a_job)
-        _DataManagerFactory._build_manager()._set(a_datanode)
+        _ScenarioManagerFactory._build_manager()._repository._save(a_scenario)
+        _TaskManagerFactory._build_manager()._repository._save(a_task)
+        _JobManagerFactory._build_manager()._repository._save(a_job)
+        _DataManagerFactory._build_manager()._repository._save(a_datanode)
 
     def test_edit_entity(self):
         with (

+ 128 - 92
tests/gui_core/test_context_is_readable.py

@@ -69,10 +69,12 @@ def mock_core_get(entity_id):
 
 class MockState(State):
     def __init__(self, **kwargs) -> None:
-        self.assign = t.cast(t.Callable, kwargs.get("assign")) # type: ignore[method-assign]
+        self.assign = t.cast(t.Callable, kwargs.get("assign"))  # type: ignore[method-assign]
         self.gui = t.cast(Gui, kwargs.get("gui"))
+
     def get_gui(self):
         return self.gui
+
     def broadcast(self, name: str, value: t.Any):
         pass
 
@@ -80,12 +82,12 @@ class MockState(State):
 class TestGuiCoreContext_is_readable:
     @pytest.fixture(scope="class", autouse=True)
     def set_entity(self):
-        _CycleManagerFactory._build_manager()._set(a_cycle)
-        _ScenarioManagerFactory._build_manager()._set(a_scenario)
-        _TaskManagerFactory._build_manager()._set(a_task)
-        _JobManagerFactory._build_manager()._set(a_job)
-        _DataManagerFactory._build_manager()._set(a_datanode)
-        _SubmissionManagerFactory._build_manager()._set(a_submission)
+        _CycleManagerFactory._build_manager()._repository._save(a_cycle)
+        _ScenarioManagerFactory._build_manager()._repository._save(a_scenario)
+        _TaskManagerFactory._build_manager()._repository._save(a_task)
+        _JobManagerFactory._build_manager()._repository._save(a_job)
+        _DataManagerFactory._build_manager()._repository._save(a_datanode)
+        _SubmissionManagerFactory._build_manager()._repository._save(a_submission)
 
     def test_scenario_adapter(self):
         with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
@@ -128,26 +130,9 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.crud_scenario(
                 MockState(assign=assign, gui=gui_core_context.gui),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        "",
-                        "",
-                        "",
-                        True,
-                        False,
-                        {"name": "name", "id": a_scenario.id},
-                    ],
-                    "error_id": "error_var",
-                }),
-            )
-            assign.assert_not_called()
-
-            with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_readable_false):
-                assign.reset_mock()
-                gui_core_context.crud_scenario(
-                    MockState(assign=assign),
-                    "",
-                    t.cast(dict, {
+                t.cast(
+                    dict,
+                    {
                         "args": [
                             "",
                             "",
@@ -157,7 +142,30 @@ class TestGuiCoreContext_is_readable:
                             {"name": "name", "id": a_scenario.id},
                         ],
                         "error_id": "error_var",
-                    }),
+                    },
+                ),
+            )
+            assign.assert_not_called()
+
+            with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_readable_false):
+                assign.reset_mock()
+                gui_core_context.crud_scenario(
+                    MockState(assign=assign),
+                    "",
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                "",
+                                "",
+                                "",
+                                True,
+                                False,
+                                {"name": "name", "id": a_scenario.id},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -170,12 +178,15 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.edit_entity(
                 MockState(assign=assign),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        {"name": "name", "id": a_scenario.id},
-                    ],
-                    "error_id": "error_var",
-                }),
+                t.cast(
+                    dict,
+                    {
+                        "args": [
+                            {"name": "name", "id": a_scenario.id},
+                        ],
+                        "error_id": "error_var",
+                    },
+                ),
             )
             assign.assert_called_once()
             assert assign.call_args.args[0] == "error_var"
@@ -186,12 +197,15 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.edit_entity(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {
-                        "args": [
-                            {"name": "name", "id": a_scenario.id},
-                        ],
-                        "error_id": "error_var",
-                    }),
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                {"name": "name", "id": a_scenario.id},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -247,12 +261,15 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.act_on_jobs(
                 MockState(assign=assign),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        {"id": [a_job.id], "action": "delete"},
-                    ],
-                    "error_id": "error_var",
-                }),
+                t.cast(
+                    dict,
+                    {
+                        "args": [
+                            {"id": [a_job.id], "action": "delete"},
+                        ],
+                        "error_id": "error_var",
+                    },
+                ),
             )
             assign.assert_called_once()
             assert assign.call_args.args[0] == "error_var"
@@ -262,12 +279,15 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.act_on_jobs(
                 MockState(assign=assign),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        {"id": [a_job.id], "action": "cancel"},
-                    ],
-                    "error_id": "error_var",
-                }),
+                t.cast(
+                    dict,
+                    {
+                        "args": [
+                            {"id": [a_job.id], "action": "cancel"},
+                        ],
+                        "error_id": "error_var",
+                    },
+                ),
             )
             assign.assert_called_once()
             assert assign.call_args.args[0] == "error_var"
@@ -278,12 +298,15 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.act_on_jobs(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {
-                        "args": [
-                            {"id": [a_job.id], "action": "delete"},
-                        ],
-                        "error_id": "error_var",
-                    }),
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                {"id": [a_job.id], "action": "delete"},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -293,12 +316,15 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.act_on_jobs(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {
-                        "args": [
-                            {"id": [a_job.id], "action": "cancel"},
-                        ],
-                        "error_id": "error_var",
-                    }),
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                {"id": [a_job.id], "action": "cancel"},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -311,12 +337,15 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.edit_data_node(
                 MockState(assign=assign),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        {"id": a_datanode.id},
-                    ],
-                    "error_id": "error_var",
-                }),
+                t.cast(
+                    dict,
+                    {
+                        "args": [
+                            {"id": a_datanode.id},
+                        ],
+                        "error_id": "error_var",
+                    },
+                ),
             )
             assign.assert_called_once()
             assert assign.call_args.args[0] == "error_var"
@@ -327,12 +356,15 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.edit_data_node(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {
-                        "args": [
-                            {"id": a_datanode.id},
-                        ],
-                        "error_id": "error_var",
-                    }),
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                {"id": a_datanode.id},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -347,12 +379,15 @@ class TestGuiCoreContext_is_readable:
             gui_core_context.lock_datanode_for_edit(
                 MockState(assign=assign),
                 "",
-                t.cast(dict, {
-                    "args": [
-                        {"id": a_datanode.id},
-                    ],
-                    "error_id": "error_var",
-                }),
+                t.cast(
+                    dict,
+                    {
+                        "args": [
+                            {"id": a_datanode.id},
+                        ],
+                        "error_id": "error_var",
+                    },
+                ),
             )
             assign.assert_called_once()
             assert assign.call_args.args[0] == "error_var"
@@ -363,12 +398,15 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.lock_datanode_for_edit(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {
-                        "args": [
-                            {"id": a_datanode.id},
-                        ],
-                        "error_id": "error_var",
-                    }),
+                    t.cast(
+                        dict,
+                        {
+                            "args": [
+                                {"id": a_datanode.id},
+                            ],
+                            "error_id": "error_var",
+                        },
+                    ),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"
@@ -392,9 +430,7 @@ class TestGuiCoreContext_is_readable:
             gui_core_context = _GuiCoreContext(mockGui)
             assign = Mock()
             gui_core_context.update_data(
-                MockState(assign=assign),
-                "",
-                t.cast(dict, {"args": [{"id": a_datanode.id, "error_id": "error_var"}]})
+                MockState(assign=assign), "", t.cast(dict, {"args": [{"id": a_datanode.id, "error_id": "error_var"}]})
             )
             assign.assert_called()
             assert assign.call_args_list[0].args[0] == "error_var"
@@ -405,7 +441,7 @@ class TestGuiCoreContext_is_readable:
                 gui_core_context.update_data(
                     MockState(assign=assign),
                     "",
-                    t.cast(dict, {"args": [{"id": a_datanode.id, "error_id": "error_var"}]})
+                    t.cast(dict, {"args": [{"id": a_datanode.id, "error_id": "error_var"}]}),
                 )
                 assign.assert_called_once()
                 assert assign.call_args.args[0] == "error_var"

+ 26 - 30
tests/gui_core/test_context_on_file_action.py

@@ -22,9 +22,8 @@ from taipy.core.data._file_datanode_mixin import _FileDataNodeMixin
 from taipy.core.reason import Reason, ReasonCollection
 from taipy.gui_core._context import _GuiCoreContext
 
-dn = PickleDataNode("dn_config_id",
-                    scope = Scope.GLOBAL,
-                    properties={"default_path": "pa/th"})
+dn = PickleDataNode("dn_config_id", scope=Scope.GLOBAL, properties={"default_path": "pa/th"})
+
 
 def core_get(entity_id):
     if entity_id == dn.id:
@@ -32,7 +31,7 @@ def core_get(entity_id):
     return None
 
 
-def not_downloadable ():
+def not_downloadable():
     return ReasonCollection()._add_reason(dn.id, Reason("foo"))
 
 
@@ -56,11 +55,11 @@ def check_fails(**kwargs):
     raise Exception("Failed")
 
 
-def upload_fails (a, b, editor_id, comment):
+def upload_fails(a, b, editor_id, comment):
     return ReasonCollection()._add_reason(dn.id, Reason("bar"))
 
 
-def download_fails (a, b, editor_id, comment):
+def download_fails(a, b, editor_id, comment):
     return ReasonCollection()._add_reason(dn.id, Reason("bar"))
 
 
@@ -70,10 +69,9 @@ class MockState:
 
 
 class TestGuiCoreContext_on_file_action:
-
     @pytest.fixture(scope="class", autouse=True)
     def set_entities(self):
-        _DataManagerFactory._build_manager()._set(dn)
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
     def test_does_not_fail_if_wrong_args(self):
         gui_core_context = _GuiCoreContext(Mock(Gui))
@@ -112,11 +110,7 @@ class TestGuiCoreContext_on_file_action:
                         payload={"args": [{"id": dn.id, "error_id": "error_var", "path": "pa/th"}]},
                     )
                     mock_core_get.assert_called_once_with(dn.id)
-                    mock_upload.assert_called_once_with(
-                        "pa/th",
-                        None,
-                        editor_id="a_client_id",
-                        comment=None)
+                    mock_upload.assert_called_once_with("pa/th", None, editor_id="a_client_id", comment=None)
                     assign.assert_not_called()
 
     def test_upload_file_with_checker(self):
@@ -125,21 +119,25 @@ class TestGuiCoreContext_on_file_action:
                 with patch.object(_FileDataNodeMixin, "_upload") as mock_upload:
                     mockGui = Mock(Gui)
                     mockGui._get_client_id = lambda: "a_client_id"
-                    mockGui._get_user_function = lambda _ : _
+                    mockGui._get_user_function = lambda _: _
                     gui_core_context = _GuiCoreContext(mockGui)
                     assign = Mock()
                     gui_core_context.on_file_action(
                         state=MockState(assign=assign),
                         id="",
-                        payload={"args": [
-                            {"id": dn.id, "error_id": "error_var", "path": "pa/th", "upload_check": mock_checker}]},
+                        payload={
+                            "args": [
+                                {"id": dn.id, "error_id": "error_var", "path": "pa/th", "upload_check": mock_checker}
+                            ]
+                        },
                     )
                     mock_core_get.assert_called_once_with(dn.id)
                     mock_upload.assert_called_once_with(
                         "pa/th",
                         t.cast(t.Callable[[str, t.Any], bool], mock_checker),
                         editor_id="a_client_id",
-                        comment=None)
+                        comment=None,
+                    )
                     assign.assert_not_called()
 
     def test_upload_file_with_failing_checker(self):
@@ -148,21 +146,25 @@ class TestGuiCoreContext_on_file_action:
                 with patch.object(_FileDataNodeMixin, "_upload", side_effect=upload_fails) as mock_upload:
                     mockGui = Mock(Gui)
                     mockGui._get_client_id = lambda: "a_client_id"
-                    mockGui._get_user_function = lambda _ : _
+                    mockGui._get_user_function = lambda _: _
                     gui_core_context = _GuiCoreContext(mockGui)
                     assign = Mock()
                     gui_core_context.on_file_action(
                         state=MockState(assign=assign),
                         id="",
-                        payload={"args": [
-                            {"id": dn.id, "error_id": "error_var", "path": "pa/th", "upload_check": check_fails}]},
+                        payload={
+                            "args": [
+                                {"id": dn.id, "error_id": "error_var", "path": "pa/th", "upload_check": check_fails}
+                            ]
+                        },
                     )
                     mock_core_get.assert_called_once_with(dn.id)
                     mock_upload.assert_called_once_with(
                         "pa/th",
                         t.cast(t.Callable[[str, t.Any], bool], check_fails),
                         editor_id="a_client_id",
-                        comment=None)
+                        comment=None,
+                    )
                     assign.assert_called_once_with("error_var", "Data unavailable: bar.")
 
     def test_download_file_not_downloadable(self):
@@ -171,16 +173,13 @@ class TestGuiCoreContext_on_file_action:
                 with patch.object(_FileDataNodeMixin, "_get_downloadable_path") as mock_download:
                     mockGui = Mock(Gui)
                     mockGui._get_client_id = lambda: "a_client_id"
-                    mockGui._get_user_function = lambda _ : _
+                    mockGui._get_user_function = lambda _: _
                     gui_core_context = _GuiCoreContext(mockGui)
                     assign = Mock()
                     gui_core_context.on_file_action(
                         state=MockState(assign=assign),
                         id="",
-                        payload={"args": [
-                            {"id": dn.id,
-                             "action": "export",
-                             "error_id": "error_var"}]},
+                        payload={"args": [{"id": dn.id, "action": "export", "error_id": "error_var"}]},
                     )
                     mock_core_get.assert_called_once_with(dn.id)
                     mock_download.assert_not_called()
@@ -198,10 +197,7 @@ class TestGuiCoreContext_on_file_action:
                     gui_core_context.on_file_action(
                         state=MockState(assign=assign),
                         id="",
-                        payload={"args": [
-                            {"id": dn.id,
-                             "action": "export",
-                             "error_id": "error_var"}]},
+                        payload={"args": [{"id": dn.id, "action": "export", "error_id": "error_var"}]},
                     )
                     mock_core_get.assert_called_once_with(dn.id)
                     mock_download.assert_called_once()

+ 18 - 19
tests/gui_core/test_context_tabular_data_edit.py

@@ -19,7 +19,7 @@ from taipy.core.data.pickle import PickleDataNode
 from taipy.core.reason import Reason, ReasonCollection
 from taipy.gui_core._context import _GuiCoreContext
 
-dn = PickleDataNode("dn_config_id", scope = Scope.GLOBAL)
+dn = PickleDataNode("dn_config_id", scope=Scope.GLOBAL)
 
 
 def core_get(entity_id):
@@ -35,6 +35,7 @@ def is_false(entity_id):
 def is_true(entity_id):
     return True
 
+
 def fails(**kwargs):
     raise Exception("Failed")
 
@@ -45,9 +46,8 @@ class MockState:
 
 
 class TestGuiCoreContext_update_data:
-
     def test_do_not_edit_tabular_data_if_not_readable(self):
-        _DataManagerFactory._build_manager()._set(dn)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         with patch("taipy.gui_core._context.is_readable", side_effect=is_false):
             with patch("taipy.gui_core._context.core_get", side_effect=core_get) as mock_core_get:
                 with patch.object(DataNode, "write") as mock_write:
@@ -58,7 +58,7 @@ class TestGuiCoreContext_update_data:
                     assign.assert_called_once_with("error_var", f"Data node {dn.id} is not readable: foo.")
 
     def test_do_not_edit_tabular_data_if_not_editable(self):
-        _DataManagerFactory._build_manager()._set(dn)
+        _DataManagerFactory._build_manager()._repository._save(dn)
         with patch("taipy.gui_core._context.is_readable", side_effect=is_true):
             with patch("taipy.gui_core._context.is_editable", side_effect=is_false):
                 with patch("taipy.gui_core._context.core_get", side_effect=core_get) as mock_core_get:
@@ -122,7 +122,6 @@ class TestGuiCoreContext_update_data:
             with patch("taipy.gui_core._context.is_editable", side_effect=is_true):
                 with patch("taipy.gui_core._context.core_get", side_effect=core_get) as mock_core_get:
                     with patch.object(DataNode, "write") as mock_write:
-
                         assign = self.__call_update_data(col, idx, new_value)
 
                         mock_core_get.assert_called_once_with(dn.id)
@@ -233,8 +232,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating data node tabular value. list assignment index out of range")
+                            "error_var", "Error updating data node tabular value. list assignment index out of range"
+                        )
 
     def test_edit_dict_wrong_col(self):
         data = {"a": [1, 2, 3], "b": [4, 5, 6]}
@@ -250,8 +249,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating Data node: dict values must be list or tuple.")
+                            "error_var", "Error updating Data node: dict values must be list or tuple."
+                        )
 
     def test_edit_dict_of_tuples(self):
         data = {"a": (1, 2, 3), "b": (4, 5, 6)}
@@ -283,8 +282,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating data node tabular value. list assignment index out of range")
+                            "error_var", "Error updating data node tabular value. list assignment index out of range"
+                        )
 
     def test_edit_dict_of_tuples_wrong_col(self):
         data = {"a": (1, 2, 3), "b": (4, 5, 6)}
@@ -300,8 +299,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating Data node: dict values must be list or tuple.")
+                            "error_var", "Error updating Data node: dict values must be list or tuple."
+                        )
 
     def test_edit_wrong_dict(self):
         data = {"a": 1, "b": 2}
@@ -317,8 +316,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating Data node: dict values must be list or tuple.")
+                            "error_var", "Error updating Data node: dict values must be list or tuple."
+                        )
 
     def test_edit_list(self):
         data = [[1, 2, 3], [4, 5, 6]]
@@ -350,8 +349,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating data node tabular value. list index out of range")
+                            "error_var", "Error updating data node tabular value. list index out of range"
+                        )
 
     def test_edit_list_wrong_col(self):
         data = [[1, 2, 3], [4, 5, 6]]
@@ -367,8 +366,8 @@ class TestGuiCoreContext_update_data:
                         mock_core_get.assert_called_once_with(dn.id)
                         mock_write.assert_not_called()
                         assign.assert_called_once_with(
-                            "error_var",
-                            "Error updating data node tabular value. list assignment index out of range")
+                            "error_var", "Error updating data node tabular value. list assignment index out of range"
+                        )
 
     def test_edit_tuple(self):
         data = ([1, 2, 3], [4, 5, 6])

+ 19 - 17
tests/gui_core/test_context_update_data.py

@@ -35,6 +35,7 @@ def is_false(entity_id):
 def is_true(entity_id):
     return True
 
+
 def fails(**kwargs):
     raise Exception("Failed")
 
@@ -45,10 +46,9 @@ class MockState:
 
 
 class TestGuiCoreContext_update_data:
-
     @pytest.fixture(scope="class", autouse=True)
     def set_entities(self):
-        _DataManagerFactory._build_manager()._set(dn)
+        _DataManagerFactory._build_manager()._repository._save(dn)
 
     def test_does_not_fail_if_wrong_args(self):
         gui_core_context = _GuiCoreContext(Mock(Gui))
@@ -67,7 +67,7 @@ class TestGuiCoreContext_update_data:
                     gui_core_context.update_data(
                         state=MockState(assign=assign),
                         id="",
-                        payload={"args": [{"id": dn.id,"error_id": "error_var"}]},
+                        payload={"args": [{"id": dn.id, "error_id": "error_var"}]},
                     )
                     mock_core_get.assert_not_called()
                     mock_write.assert_not_called()
@@ -85,7 +85,7 @@ class TestGuiCoreContext_update_data:
                         gui_core_context.update_data(
                             state=MockState(assign=assign),
                             id="",
-                            payload={"args": [{"id": dn.id,"error_id": "error_var"}]},
+                            payload={"args": [{"id": dn.id, "error_id": "error_var"}]},
                         )
                         mock_core_get.assert_not_called()
                         mock_write.assert_not_called()
@@ -104,17 +104,20 @@ class TestGuiCoreContext_update_data:
                             state=MockState(assign=assign),
                             id="",
                             payload={
-                                "args": [{
-                                    "id": dn.id,
-                                    "value": "data to write",
-                                    "comment": "The comment",
-                                    "error_id": "error_var"}],
+                                "args": [
+                                    {
+                                        "id": dn.id,
+                                        "value": "data to write",
+                                        "comment": "The comment",
+                                        "error_id": "error_var",
+                                    }
+                                ],
                             },
                         )
                         mock_core_get.assert_called_once_with(dn.id)
-                        mock_write.assert_called_once_with("data to write",
-                                                           editor_id="a_client_id",
-                                                           comment="The comment")
+                        mock_write.assert_called_once_with(
+                            "data to write", editor_id="a_client_id", comment="The comment"
+                        )
                         assign.assert_called_once_with("error_var", "")
 
     def test_write_date_data_with_editor_and_comment(self):
@@ -137,14 +140,13 @@ class TestGuiCoreContext_update_data:
                                         "value": "2000-01-01 00:00:00",
                                         "type": "date",
                                         "comment": "The comment",
-                                        "error_id": "error_var"
-                                    }],
+                                        "error_id": "error_var",
+                                    }
+                                ],
                             },
                         )
                         mock_core_get.assert_called_once_with(dn.id)
-                        mock_write.assert_called_once_with(date,
-                                                           editor_id="a_client_id",
-                                                           comment="The comment")
+                        mock_write.assert_called_once_with(date, editor_id="a_client_id", comment="The comment")
                         assign.assert_called_once_with("error_var", "")
 
     def test_write_int_data_with_editor_and_comment(self):

+ 5 - 3
tests/rest/conftest.py

@@ -268,7 +268,7 @@ def create_cycle_list():
     manager = _CycleManager
     for i in range(10):
         c = __create_cycle(f"cycle_{i}")
-        manager._set(c)
+        manager._repository._save(c)
     return cycles
 
 
@@ -292,7 +292,7 @@ def default_cycle():
 def __create_job():
     task_manager = _TaskManager
     task = __default_task()
-    task_manager._set(task)
+    task_manager._repository._save(task)
     submit_id = f"SUBMISSION_{str(uuid.uuid4())}"
     return Job(id=JobId(f"JOB_{uuid.uuid4()}"), task=task, submit_id=submit_id, submit_entity_id=task.id)
 
@@ -308,9 +308,10 @@ def create_job_list():
     manager = _JobManager
     for _ in range(10):
         c = __create_job()
-        manager._set(c)
+        manager._repository._save(c)
     return jobs
 
+
 @pytest.fixture
 def init_orchestrator():
     def _init_orchestrator():
@@ -324,6 +325,7 @@ def init_orchestrator():
 
     return _init_orchestrator
 
+
 @pytest.fixture(scope="function", autouse=True)
 def cleanup_files(reset_configuration_singleton, inject_rest_sections, inject_core_sections):
     reset_configuration_singleton()

+ 2 - 2
tests/rest/test_sequence.py

@@ -60,7 +60,7 @@ def test_create_sequence(client, default_scenario):
     rep = client.post(sequences_url, json={"scenario_id": "SCENARIO_scenario_id", "sequence_name": "sequence"})
     assert rep.status_code == 404
 
-    _ScenarioManagerFactory._build_manager()._set(default_scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(default_scenario)
     with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._get") as config_mock:
         config_mock.return_value = default_scenario
         sequences_url = url_for("api.sequences")
@@ -91,7 +91,7 @@ def test_execute_sequence(client, default_scenario):
     rep = client.post(user_url)
     assert rep.status_code == 404
 
-    _ScenarioManagerFactory._build_manager()._set(default_scenario)
+    _ScenarioManagerFactory._build_manager()._repository._save(default_scenario)
     with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._get") as config_mock:
         config_mock.return_value = default_scenario
         sequences_url = url_for("api.sequences")