فهرست منبع

Merge pull request #619 from Avaiga/feature/clean_test_orchetrator

Feature/clean test orchetrator
Jean-Robin 1 سال پیش
والد
کامیت
ce642bfe66
39فایلهای تغییر یافته به همراه2502 افزوده شده و 1250 حذف شده
  1. 1 1
      .github/workflows/overall-tests.yml
  2. 127 9
      .github/workflows/partial-tests.yml
  3. 2 1
      Pipfile
  4. 3 0
      pytest.ini
  5. 1 1
      taipy/core/Pipfile
  6. 6 2
      taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py
  7. 8 21
      taipy/core/_orchestrator/_orchestrator.py
  8. 1 1
      taipy/core/setup.py
  9. 67 0
      tests/core/_orchestrator/_dispatcher/mock_standalone_dispatcher.py
  10. 63 0
      tests/core/_orchestrator/_dispatcher/test_development_job_dispatcher.py
  11. 117 0
      tests/core/_orchestrator/_dispatcher/test_dispatcher__execute_job.py
  12. 103 0
      tests/core/_orchestrator/_dispatcher/test_dispatcher__needs_to_run.py
  13. 61 0
      tests/core/_orchestrator/_dispatcher/test_dispatcher__update_job_status.py
  14. 0 163
      tests/core/_orchestrator/_dispatcher/test_job_dispatcher.py
  15. 134 0
      tests/core/_orchestrator/_dispatcher/test_standalone_job_dispatcher.py
  16. 133 0
      tests/core/_orchestrator/_dispatcher/test_task_function_wrapper.py
  17. 9 886
      tests/core/_orchestrator/test_orchestrator.py
  18. 178 0
      tests/core/_orchestrator/test_orchestrator__cancel_jobs.py
  19. 156 0
      tests/core/_orchestrator/test_orchestrator__is_blocked.py
  20. 94 0
      tests/core/_orchestrator/test_orchestrator__lock_dn_output_and_create_job.py
  21. 165 0
      tests/core/_orchestrator/test_orchestrator__on_status_change.py
  22. 78 0
      tests/core/_orchestrator/test_orchestrator__orchestrate_job_to_run_or_block.py
  23. 486 0
      tests/core/_orchestrator/test_orchestrator__submit.py
  24. 205 0
      tests/core/_orchestrator/test_orchestrator__submit_task.py
  25. 89 39
      tests/core/_orchestrator/test_orchestrator_factory.py
  26. 9 8
      tests/core/cycle/test_cycle_manager.py
  27. 28 10
      tests/core/data/test_csv_data_node.py
  28. 94 71
      tests/core/data/test_excel_data_node.py
  29. 45 10
      tests/core/data/test_parquet_data_node.py
  30. 2 1
      tests/core/data/test_pickle_data_node.py
  31. 2 0
      tests/core/data/test_sql_data_node.py
  32. 2 0
      tests/core/data/test_sql_table_data_node.py
  33. 2 1
      tests/core/job/test_job.py
  34. 1 1
      tests/core/notification/test_events_published.py
  35. 1 0
      tests/core/scenario/test_scenario_manager.py
  36. 1 0
      tests/core/test_core.py
  37. 21 23
      tests/core/test_core_cli.py
  38. 6 0
      tests/core/version/test_production_version_migration.py
  39. 1 1
      tools/packages/taipy-core/setup.requirements.txt

+ 1 - 1
.github/workflows/overall-tests.yml

@@ -41,7 +41,7 @@ jobs:
         run: pipenv run playwright install chromium --with-deps
         run: pipenv run playwright install chromium --with-deps
 
 
       - name: Pytest
       - name: Pytest
-        run: pipenv run pytest --cov=taipy --cov-append --cov-report="xml:overall-coverage.xml" --cov-report term-missing tests
+        run: pipenv run pytest -m "not orchestrator_dispatcher and not modin and not standalone" --cov=taipy --cov-append --cov-report="xml:overall-coverage.xml" --cov-report term-missing tests
 
 
       - name: Coverage
       - name: Coverage
         if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
         if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'

+ 127 - 9
.github/workflows/partial-tests.yml

@@ -21,6 +21,7 @@ jobs:
           extra-pycodestyle-options: "--max-line-length=120  --exclude=tests/gui --ignore=E121,E123,E126,E226,E24,E704,W503,W504,E203"
           extra-pycodestyle-options: "--max-line-length=120  --exclude=tests/gui --ignore=E121,E123,E126,E226,E24,E704,W503,W504,E203"
           extra-mypy-options: "--ignore-missing-imports --implicit-optional --no-namespace-packages --exclude (taipy/templates/|generate_pyi.py|tools) --follow-imports skip"
           extra-mypy-options: "--ignore-missing-imports --implicit-optional --no-namespace-packages --exclude (taipy/templates/|generate_pyi.py|tools) --follow-imports skip"
           extra-isort-options: "--line-length=120 --force-grid-wrap=10 --multi-line=VERTICAL_HANGING_INDENT --trailing-comma"
           extra-isort-options: "--line-length=120 --force-grid-wrap=10 --multi-line=VERTICAL_HANGING_INDENT --trailing-comma"
+
   tests:
   tests:
     needs: linter
     needs: linter
     timeout-minutes: 40
     timeout-minutes: 40
@@ -81,32 +82,149 @@ jobs:
 
 
       - name: Pytest CLI
       - name: Pytest CLI
         if: steps.changes.outputs.cli == 'true'
         if: steps.changes.outputs.cli == 'true'
-        run: pipenv run pytest  tests/cli
+        run: pipenv run pytest tests/cli
 
 
       - name: Pytest Config
       - name: Pytest Config
         if: steps.changes.outputs.config == 'true'
         if: steps.changes.outputs.config == 'true'
-        run: pipenv run pytest  tests/config
+        run: pipenv run pytest tests/config
 
 
       - name: Pytest Core
       - name: Pytest Core
         if: steps.changes.outputs.core == 'true'
         if: steps.changes.outputs.core == 'true'
-        run: pipenv run pytest  tests/core
+        run: pipenv run pytest -m "not orchestrator_dispatcher and not modin and not standalone" tests/core
 
 
       - name: Pytest GUI
       - name: Pytest GUI
         if: steps.changes.outputs.gui == 'true'
         if: steps.changes.outputs.gui == 'true'
-        run: pipenv run pytest  tests/gui
+        run: pipenv run pytest tests/gui
 
 
       - name: Pytest GUI Core
       - name: Pytest GUI Core
         if: steps.changes.outputs.gui-core == 'true'
         if: steps.changes.outputs.gui-core == 'true'
-        run: pipenv run pytest  tests/gui_core
+        run: pipenv run pytest tests/gui_core
 
 
       - name: Pytest Logger
       - name: Pytest Logger
         if: steps.changes.outputs.logger == 'true'
         if: steps.changes.outputs.logger == 'true'
-        run: pipenv run pytest  tests/logger
+        run: pipenv run pytest tests/logger
 
 
       - name: Pytest Rest
       - name: Pytest Rest
         if: steps.changes.outputs.rest == 'true'
         if: steps.changes.outputs.rest == 'true'
-        run: pipenv run pytest  tests/rest
+        run: pipenv run pytest tests/rest
 
 
-      - name: Pytest Rest
+      - name: Pytest Templates
         if: steps.changes.outputs.templates == 'true'
         if: steps.changes.outputs.templates == 'true'
-        run: pipenv run pytest  tests/templates
+        run: pipenv run pytest tests/templates
+
+  submit_tests:
+    needs: linter
+    timeout-minutes: 20
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.8', '3.9', '3.10', '3.11']
+        os: [ubuntu-latest, windows-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: dorny/paths-filter@v2
+        id: changes
+        with:
+          filters: |
+            core:
+              - 'taipy/core/**'
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{matrix.python-version}}
+
+      - name: Install pipenv
+        if: steps.changes.outputs.core == 'true'
+        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
+
+      - name: Install Dependencies
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv install --dev --python=${{ matrix.python-version }}
+
+      - name: Setup LibMagic (MacOS)
+        if: matrix.os == 'macos-latest' && steps.changes.outputs.core == 'true'
+        run: brew install libmagic
+
+      - name: Pytest Core orchestrator_dispatcher
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv run pytest -m "orchestrator_dispatcher" tests/core
+
+  standalone_tests:
+    needs: linter
+    timeout-minutes: 20
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.8', '3.9', '3.10', '3.11']
+        os: [ubuntu-latest, windows-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: dorny/paths-filter@v2
+        id: changes
+        with:
+          filters: |
+            core:
+              - 'taipy/core/**'
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{matrix.python-version}}
+
+      - name: Install pipenv
+        if: steps.changes.outputs.core == 'true'
+        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
+
+      - name: Install Dependencies
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv install --dev --python=${{ matrix.python-version }}
+
+      - name: Setup LibMagic (MacOS)
+        if: matrix.os == 'macos-latest' && steps.changes.outputs.core == 'true'
+        run: brew install libmagic
+
+      - name: Pytest Core standalone
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv run pytest -m "standalone" tests/core
+
+  modin_tests:
+    needs: linter
+    timeout-minutes: 20
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.8', '3.9', '3.10', '3.11']
+        os: [ubuntu-latest, windows-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: dorny/paths-filter@v2
+        id: changes
+        with:
+          filters: |
+            core:
+              - 'taipy/core/**'
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{matrix.python-version}}
+
+      - name: Install pipenv
+        if: steps.changes.outputs.core == 'true'
+        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
+
+      - name: Install Dependencies
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv install --dev --python=${{ matrix.python-version }}
+
+      - name: Setup LibMagic (MacOS)
+        if: matrix.os == 'macos-latest' && steps.changes.outputs.core == 'true'
+        run: brew install libmagic
+
+      - name: Pytest Core modin
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv run pytest -m "modin" tests/core

+ 2 - 1
Pipfile

@@ -19,7 +19,7 @@ gitignore-parser = "==0.1.1"
 kthread = "==0.2.3"
 kthread = "==0.2.3"
 markdown = "==3.4.4"
 markdown = "==3.4.4"
 marshmallow = "==3.20.1"
 marshmallow = "==3.20.1"
-modin = {extras = ["dask"], version = "==0.23.0"}
+modin = {extras = ["dask"], version = "==0.23.1"}
 networkx = "==2.6"
 networkx = "==2.6"
 openpyxl = "==3.1.2"
 openpyxl = "==3.1.2"
 pandas = "==2.0.0"
 pandas = "==2.0.0"
@@ -41,6 +41,7 @@ autopep8 = "*"
 black = "*"
 black = "*"
 flake8 = "*"
 flake8 = "*"
 flake8-docstrings = "*"
 flake8-docstrings = "*"
+freezegun = "*"
 ipython = "*"
 ipython = "*"
 ipykernel = "*"
 ipykernel = "*"
 isort = "*"
 isort = "*"

+ 3 - 0
pytest.ini

@@ -9,3 +9,6 @@ filterwarnings =
     ignore::FutureWarning:pyarrow
     ignore::FutureWarning:pyarrow
 markers =
 markers =
     teste2e:End-to-end tests
     teste2e:End-to-end tests
+    orchestrator_dispatcher:Orchestrator dispatcher tests
+    modin:Tests using modin
+    standalone:Tests starting a standalone dispatcher thread

+ 1 - 1
taipy/core/Pipfile

@@ -4,7 +4,7 @@ verify_ssl = true
 name = "pypi"
 name = "pypi"
 
 
 [packages]
 [packages]
-modin = {extras = ["dask"], version = "==0.23.0"}
+modin = {extras = ["dask"], version = "==0.23.1"}
 networkx = "==2.6"
 networkx = "==2.6"
 openpyxl = "==3.1.2"
 openpyxl = "==3.1.2"
 pyarrow = "==10.0.1"
 pyarrow = "==10.0.1"

+ 6 - 2
taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py

@@ -11,6 +11,7 @@
 
 
 import threading
 import threading
 from abc import abstractmethod
 from abc import abstractmethod
+from queue import Empty
 from typing import Dict, Optional
 from typing import Dict, Optional
 
 
 from taipy.config.config import Config
 from taipy.config.config import Config
@@ -31,7 +32,7 @@ class _JobDispatcher(threading.Thread):
     __logger = _TaipyLogger._get_logger()
     __logger = _TaipyLogger._get_logger()
     _nb_available_workers: int = 1
     _nb_available_workers: int = 1
 
 
-    def __init__(self, orchestrator: Optional[_AbstractOrchestrator]):
+    def __init__(self, orchestrator: _AbstractOrchestrator):
         threading.Thread.__init__(self, name="Thread-Taipy-JobDispatcher")
         threading.Thread.__init__(self, name="Thread-Taipy-JobDispatcher")
         self.daemon = True
         self.daemon = True
         self.orchestrator = orchestrator
         self.orchestrator = orchestrator
@@ -58,7 +59,10 @@ class _JobDispatcher(threading.Thread):
                     with self.lock:
                     with self.lock:
                         job = self.orchestrator.jobs_to_run.get(block=True, timeout=0.1)
                         job = self.orchestrator.jobs_to_run.get(block=True, timeout=0.1)
                     self._execute_job(job)
                     self._execute_job(job)
-            except Exception:  # In case the last job of the queue has been removed.
+            except Empty:  # In case the last job of the queue has been removed.
+                pass
+            except Exception as e:
+                _TaipyLogger._get_logger().exception(e)
                 pass
                 pass
 
 
     def _can_execute(self) -> bool:
     def _can_execute(self) -> bool:

+ 8 - 21
taipy/core/_orchestrator/_orchestrator.py

@@ -18,16 +18,14 @@ from typing import Callable, Iterable, List, Optional, Set, Union
 
 
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.logger._taipy_logger import _TaipyLogger
 from taipy.logger._taipy_logger import _TaipyLogger
-
+from ._abstract_orchestrator import _AbstractOrchestrator
 from .._entity.submittable import Submittable
 from .._entity.submittable import Submittable
 from ..data._data_manager_factory import _DataManagerFactory
 from ..data._data_manager_factory import _DataManagerFactory
 from ..job._job_manager_factory import _JobManagerFactory
 from ..job._job_manager_factory import _JobManagerFactory
 from ..job.job import Job
 from ..job.job import Job
 from ..job.job_id import JobId
 from ..job.job_id import JobId
-from ..scenario.scenario import Scenario
 from ..submission._submission_manager_factory import _SubmissionManagerFactory
 from ..submission._submission_manager_factory import _SubmissionManagerFactory
 from ..task.task import Task
 from ..task.task import Task
-from ._abstract_orchestrator import _AbstractOrchestrator
 
 
 
 
 class _Orchestrator(_AbstractOrchestrator):
 class _Orchestrator(_AbstractOrchestrator):
@@ -72,7 +70,6 @@ class _Orchestrator(_AbstractOrchestrator):
             submittable._ID_PREFIX,  # type: ignore
             submittable._ID_PREFIX,  # type: ignore
             getattr(submittable, "config_id", None),
             getattr(submittable, "config_id", None),
         )
         )
-
         jobs = []
         jobs = []
         tasks = submittable._get_sorted_tasks()
         tasks = submittable._get_sorted_tasks()
         with cls.lock:
         with cls.lock:
@@ -87,17 +84,13 @@ class _Orchestrator(_AbstractOrchestrator):
                             force=force,  # type: ignore
                             force=force,  # type: ignore
                         )
                         )
                     )
                     )
-
         submission.jobs = jobs  # type: ignore
         submission.jobs = jobs  # type: ignore
-
         cls._orchestrate_job_to_run_or_block(jobs)
         cls._orchestrate_job_to_run_or_block(jobs)
-
         if Config.job_config.is_development:
         if Config.job_config.is_development:
             cls._check_and_execute_jobs_if_development_mode()
             cls._check_and_execute_jobs_if_development_mode()
         else:
         else:
             if wait:
             if wait:
-                cls.__wait_until_job_finished(jobs, timeout=timeout)
-
+                cls._wait_until_job_finished(jobs, timeout=timeout)
         return jobs
         return jobs
 
 
     @classmethod
     @classmethod
@@ -113,7 +106,6 @@ class _Orchestrator(_AbstractOrchestrator):
 
 
         Parameters:
         Parameters:
              task (Task^): The task to submit for execution.
              task (Task^): The task to submit for execution.
-             submit_id (str): The optional id to differentiate each submission.
              callbacks: The optional list of functions that should be executed on job status change.
              callbacks: The optional list of functions that should be executed on job status change.
              force (bool): Enforce execution of the task even if its output data nodes are cached.
              force (bool): Enforce execution of the task even if its output data nodes are cached.
              wait (bool): Wait for the orchestrated job created from the task submission to be finished
              wait (bool): Wait for the orchestrated job created from the task submission to be finished
@@ -133,18 +125,14 @@ class _Orchestrator(_AbstractOrchestrator):
                 itertools.chain([submission._update_submission_status], callbacks or []),
                 itertools.chain([submission._update_submission_status], callbacks or []),
                 force,
                 force,
             )
             )
-
         jobs = [job]
         jobs = [job]
         submission.jobs = jobs  # type: ignore
         submission.jobs = jobs  # type: ignore
-
         cls._orchestrate_job_to_run_or_block(jobs)
         cls._orchestrate_job_to_run_or_block(jobs)
-
         if Config.job_config.is_development:
         if Config.job_config.is_development:
             cls._check_and_execute_jobs_if_development_mode()
             cls._check_and_execute_jobs_if_development_mode()
         else:
         else:
             if wait:
             if wait:
-                cls.__wait_until_job_finished(job, timeout=timeout)
-
+                cls._wait_until_job_finished(job, timeout=timeout)
         return job
         return job
 
 
     @classmethod
     @classmethod
@@ -182,23 +170,22 @@ class _Orchestrator(_AbstractOrchestrator):
             cls.jobs_to_run.put(job)
             cls.jobs_to_run.put(job)
 
 
     @classmethod
     @classmethod
-    def __wait_until_job_finished(cls, jobs: Union[List[Job], Job], timeout: Optional[Union[float, int]] = None):
-        def __check_if_timeout(start, timeout):
-            if timeout:
-                return (datetime.now() - start).seconds < timeout
+    def _wait_until_job_finished(cls, jobs: Union[List[Job], Job], timeout: Optional[Union[float, int]] = None):
+        #  Note: this method should be prefixed by two underscores, but it has only one, so it can be mocked in tests.
+        def __check_if_timeout(st, to):
+            if to:
+                return (datetime.now() - st).seconds < to
             return True
             return True
 
 
         start = datetime.now()
         start = datetime.now()
         jobs = jobs if isinstance(jobs, Iterable) else [jobs]
         jobs = jobs if isinstance(jobs, Iterable) else [jobs]
         index = 0
         index = 0
-
         while __check_if_timeout(start, timeout) and index < len(jobs):
         while __check_if_timeout(start, timeout) and index < len(jobs):
             try:
             try:
                 if jobs[index]._is_finished():
                 if jobs[index]._is_finished():
                     index = index + 1
                     index = index + 1
                 else:
                 else:
                     sleep(0.5)  # Limit CPU usage
                     sleep(0.5)  # Limit CPU usage
-
             except Exception:
             except Exception:
                 pass
                 pass
 
 

+ 1 - 1
taipy/core/setup.py

@@ -30,7 +30,7 @@ requirements = [
     "pyarrow>=10.0.1,<11.0",
     "pyarrow>=10.0.1,<11.0",
     "networkx>=2.6,<3.0",
     "networkx>=2.6,<3.0",
     "openpyxl>=3.1.2,<3.2",
     "openpyxl>=3.1.2,<3.2",
-    "modin[dask]>=0.23.0,<1.0",
+    "modin[dask]>=0.23.1,<1.0",
     "pymongo[srv]>=4.2.0,<5.0",
     "pymongo[srv]>=4.2.0,<5.0",
     "sqlalchemy>=2.0.16,<2.1",
     "sqlalchemy>=2.0.16,<2.1",
     "toml>=0.10,<0.11",
     "toml>=0.10,<0.11",

+ 67 - 0
tests/core/_orchestrator/_dispatcher/mock_standalone_dispatcher.py

@@ -0,0 +1,67 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from concurrent.futures import Executor, Future
+from typing import Optional
+
+from taipy.core import Job
+from taipy.core._orchestrator._abstract_orchestrator import _AbstractOrchestrator
+from taipy.core._orchestrator._dispatcher import _StandaloneJobDispatcher
+
+
+class MockProcessPoolExecutor(Executor):
+    submit_called = []
+    f = []
+
+    def submit(self, fn, *args, **kwargs):
+        self.submit_called.append((fn, args, kwargs))
+        f = Future()
+        try:
+            result = fn(*args, **kwargs)
+        except BaseException as e:
+            f.set_exception(e)
+        else:
+            f.set_result(result)
+        self.f.append(f)
+        return f
+
+
+class MockStandaloneDispatcher(_StandaloneJobDispatcher):
+    def __init__(self, orchestrator: Optional[_AbstractOrchestrator]):
+        super(_StandaloneJobDispatcher, self).__init__(orchestrator)
+        self._executor = MockProcessPoolExecutor()
+        self.dispatch_calls = []
+        self.release_worker_calls = []
+        self.set_dispatch_processes_calls = []
+        self.pop_dispatch_processes_calls = []
+        self.update_job_status_from_future_calls = []
+
+    def mock_exception_for_job(self, task_id, e: Exception):
+        self.exceptions[task_id] = e
+
+    def _dispatch(self, job: Job):
+        self.dispatch_calls.append(job)
+        super()._dispatch(job)
+
+    def _set_dispatched_processes(self, job_id, future):
+        self.set_dispatch_processes_calls.append((job_id, future))
+        super()._set_dispatched_processes(job_id, future)
+
+    def _pop_dispatched_process(self, job_id, default=None):
+        self.pop_dispatch_processes_calls.append(job_id)
+        return super()._pop_dispatched_process(job_id, default)
+
+    def _release_worker(self, _):
+        self.release_worker_calls.append(None)
+        super()._release_worker(_)
+
+    def _update_job_status_from_future(self, job: Job, ft):
+        self.update_job_status_from_future_calls.append((job, ft))
+        super()._update_job_status_from_future(job, ft)

+ 63 - 0
tests/core/_orchestrator/_dispatcher/test_development_job_dispatcher.py

@@ -0,0 +1,63 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+import traceback
+from unittest.mock import patch
+
+from taipy.core import JobId
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job.job import Job
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+from taipy.core.task.task import Task
+
+
+def nothing(*args):
+    return
+
+
+def create_task():
+    task = Task("config_id", {}, nothing, [], [])
+    _TaskManagerFactory._build_manager()._set(task)
+    return task
+
+
+def test_dispatch_executes_the_function_no_exception():
+    task = create_task()
+    job = Job(JobId("job"), task, "s_id", task.id)
+    dispatcher = _OrchestratorFactory._build_dispatcher()
+
+    with patch("taipy.core._orchestrator._dispatcher._task_function_wrapper._TaskFunctionWrapper.execute") as mck:
+        mck.return_value = []
+        dispatcher._dispatch(job)
+
+        mck.assert_called_once()
+
+    assert job.is_completed()
+    assert job.stacktrace == []
+
+
+def test_dispatch_executes_the_function_with_exceptions():
+    task = create_task()
+    job = Job(JobId("job"), task, "s_id", task.id)
+    dispatcher = _OrchestratorFactory._build_dispatcher()
+    e_1 = Exception("test")
+    e_2 = Exception("test")
+
+    with patch("taipy.core._orchestrator._dispatcher._task_function_wrapper._TaskFunctionWrapper.execute") as mck:
+        mck.return_value = [e_1, e_2]
+        dispatcher._dispatch(job)
+
+        mck.assert_called_once()
+
+    assert len(job.stacktrace) == 2
+    assert job.stacktrace[1] == "".join(traceback.format_exception(type(e_2), value=e_2, tb=e_2.__traceback__))
+    assert job.stacktrace[0] == "".join(traceback.format_exception(type(e_1), value=e_1, tb=e_1.__traceback__))
+    assert job.is_failed()

+ 117 - 0
tests/core/_orchestrator/_dispatcher/test_dispatcher__execute_job.py

@@ -0,0 +1,117 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+from unittest import mock
+
+import taipy
+from taipy.config.config import Config
+from taipy.core import JobId, TaskId
+from taipy.core._orchestrator._dispatcher import _JobDispatcher
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+from taipy.core.job.job import Job
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+from taipy.core.task.task import Task
+
+
+def nothing(*args):
+    return
+
+
+def create_scenario():
+    dn_cfg = Config.configure_pickle_data_node("dn")
+    t1_cfg = Config.configure_task("t1", nothing, [], [dn_cfg])
+    sc_conf = Config.configure_scenario("scenario_cfg", [t1_cfg])
+    return taipy.create_scenario(sc_conf)
+
+
+def test_can_execute():
+    dispatcher = _JobDispatcher(_OrchestratorFactory._orchestrator)
+    assert dispatcher._nb_available_workers == 1
+    assert dispatcher._can_execute()
+    dispatcher._nb_available_workers = 0
+    assert not dispatcher._can_execute()
+    dispatcher._nb_available_workers = -1
+    assert not dispatcher._can_execute()
+    dispatcher._nb_available_workers = 1
+    assert dispatcher._can_execute()
+
+
+def test_execute_job():
+    scenario = create_scenario()
+    scenario.t1.skippable = True  # make the job skippable
+    scenario.dn.lock_edit()  # lock output edit
+    job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"))
+    _JobManagerFactory._build_manager()._set(job)
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
+        with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
+            mck_2.return_value = True
+            dispatcher = _JobDispatcher(_OrchestratorFactory._build_orchestrator())
+            dispatcher._execute_job(job)
+
+            mck_2.assert_called_once_with(job.task)  # This should be called to check if job needs to run
+            mck_1.assert_called_once_with(job)
+            assert job.is_running()  # The job is not executed since the dispatch is mocked
+            assert scenario.dn.edit_in_progress  # outputs must NOT have been unlocked because the disptach is mocked
+
+
+def test_execute_job_to_skip():
+    scenario = create_scenario()
+    scenario.t1.skippable = True  # make the job skippable
+    scenario.dn.lock_edit()  # lock output edit
+    job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"))
+    _JobManagerFactory._build_manager()._set(job)
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
+        with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
+            mck_2.return_value = False
+            _JobDispatcher(_OrchestratorFactory._build_orchestrator())._execute_job(job)
+
+            assert job.is_skipped()
+            mck_1.assert_not_called()  # The job is expecting to be skipped, so it must not be dispatched
+            mck_2.assert_called_once_with(job.task)  # this must be called to check if the job needs to run
+            assert not scenario.dn.edit_in_progress  # outputs must have been unlocked
+
+
+def test_execute_job_skippable_with_force():
+    scenario = create_scenario()
+    scenario.t1.skippable = True  # make the job skippable
+    scenario.dn.lock_edit()  # lock output edit
+    job = Job(JobId("id"), scenario.t1, "submit_id", TaskId("id"), force=True)
+    _JobManagerFactory._build_manager()._set(job)
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._dispatch") as mck_1:
+        with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._needs_to_run") as mck_2:
+            mck_2.return_value = False
+            dispatcher = _JobDispatcher(_OrchestratorFactory._orchestrator)
+            dispatcher._execute_job(job)
+
+            mck_1.assert_called_once_with(job)  # This should be called to dispatch the job
+            mck_2.assert_not_called()  # This should NOT be called since we force the execution anyway
+            assert job.is_running()  # The job is not executed since the dispatch is mocked
+            assert scenario.dn.edit_in_progress  # outputs must NOT have been unlocked because the disptach is mocked
+
+
+def test_execute_jobs_synchronously():
+    task = Task("config_id", {}, nothing, [], [])
+    _TaskManagerFactory._build_manager()._set(task)
+    job_1 = Job(JobId("job1"), task, "s_id", task.id)
+    job_2 = Job(JobId("job2"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._set(job_1)
+    _JobManagerFactory._build_manager()._set(job_2)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    orchestrator.jobs_to_run.put(job_1)
+    orchestrator.jobs_to_run.put(job_2)
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._execute_job") as mck:
+        _JobDispatcher(orchestrator)._execute_jobs_synchronously()
+        assert mck.call_count == 2
+        mck.assert_called_with(job_2)

+ 103 - 0
tests/core/_orchestrator/_dispatcher/test_dispatcher__needs_to_run.py

@@ -0,0 +1,103 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+from datetime import datetime, timedelta
+
+import freezegun
+
+from taipy.config import Config
+from taipy.core._orchestrator._dispatcher import _JobDispatcher
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.task._task_manager import _TaskManager
+
+
+def nothing(*args):
+    pass
+
+
+def _create_task_from_config(task_cfg):
+    return _TaskManager()._bulk_get_or_create([task_cfg])[0]
+
+
+def test_need_to_run_no_output():
+    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
+    world_cfg = Config.configure_data_node("world", default_data="world !")
+    task_cfg = Config.configure_task("name", input=[hello_cfg, world_cfg], function=nothing, output=[])
+    task = _create_task_from_config(task_cfg)
+    assert _JobDispatcher(_OrchestratorFactory._build_orchestrator())._needs_to_run(task)
+
+
+def test_need_to_run_task_not_skippable():
+    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
+    world_cfg = Config.configure_data_node("world", default_data="world !")
+    hello_world_cfg = Config.configure_data_node("hello_world")
+    task_cfg = Config.configure_task(
+        "name", input=[hello_cfg, world_cfg], function=nothing, output=[hello_world_cfg], skippable=False
+    )
+    task = _create_task_from_config(task_cfg)
+
+    assert _JobDispatcher(_OrchestratorFactory._build_orchestrator())._needs_to_run(task)
+
+
+def test_need_to_run_skippable_task_no_input():
+    hello_world_cfg = Config.configure_data_node("hello_world")
+    task_cfg = Config.configure_task("name", input=[], function=nothing, output=[hello_world_cfg], skippable=True)
+    task = _create_task_from_config(task_cfg)
+    dispatcher = _JobDispatcher(_OrchestratorFactory._build_orchestrator())
+    assert dispatcher._needs_to_run(task)  # output data is not written
+    task.output["hello_world"].write("Hello world !")
+    assert not dispatcher._needs_to_run(task)  # output data is written
+
+
+def test_need_to_run_skippable_task_no_validity_period_on_output():
+    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
+    output_cfg = Config.configure_data_node("output")
+    task_cfg = Config.configure_task("name", input=[hello_cfg], function=nothing, output=[output_cfg], skippable=True)
+    task = _create_task_from_config(task_cfg)
+    dispatcher = _JobDispatcher(_OrchestratorFactory._build_orchestrator())
+    assert dispatcher._needs_to_run(task)  # output data is not written
+    task.output["output"].write("Hello world !")
+    assert not dispatcher._needs_to_run(task)  # output data is written
+
+
+def test_need_to_run_skippable_task_with_validity_period_on_output():
+    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
+    hello_world_cfg = Config.configure_data_node("output", validity_period=timedelta(days=1))
+    task_cfg = Config.configure_task("name", nothing, [hello_cfg], [hello_world_cfg], skippable=True)
+    task = _create_task_from_config(task_cfg)
+    dispatcher = _JobDispatcher(_OrchestratorFactory._build_orchestrator())
+
+    assert dispatcher._needs_to_run(task)  # output data is not edited
+
+    output_edit_time = datetime.now()  # edit time
+    with freezegun.freeze_time(output_edit_time):
+        task.output["output"].write("Hello world !")  # output data is edited
+
+    with freezegun.freeze_time(output_edit_time + timedelta(minutes=30)):  # 30 min after edit time
+        assert not dispatcher._needs_to_run(task)  # output data is written and validity period not expired
+
+    with freezegun.freeze_time(output_edit_time + timedelta(days=1, seconds=1)):  # 1 day and 1 second after edit time
+        assert dispatcher._needs_to_run(task)  # output data is written but validity period expired
+
+
+def test_need_to_run_skippable_task_but_input_edited_after_output():
+    hello_cfg = Config.configure_data_node("input", default_data="Hello ")
+    hello_world_cfg = Config.configure_data_node("output")
+    task_cfg = Config.configure_task("name", nothing, [hello_cfg], [hello_world_cfg], skippable=True)
+    task = _create_task_from_config(task_cfg)
+    dispatcher = _JobDispatcher(_OrchestratorFactory._build_orchestrator())
+    output_edit_time = datetime.now()
+    with freezegun.freeze_time(output_edit_time):
+        task.data_nodes["output"].write("Hello world !")  # output data is edited at output_edit_time
+
+    with freezegun.freeze_time(output_edit_time + timedelta(minutes=30)):  # 30 min after output_edit_time
+        task.data_nodes["input"].write("Yellow !")
+        assert dispatcher._needs_to_run(task)  # output data is written but validity period expired

+ 61 - 0
tests/core/_orchestrator/_dispatcher/test_dispatcher__update_job_status.py

@@ -0,0 +1,61 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+import traceback
+
+from taipy import Job, Task, Status, JobId
+from taipy.core._orchestrator._dispatcher import _JobDispatcher
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+
+
+def nothing(*args):
+    pass
+
+
+def test_update_job_status_no_exception():
+    task = Task("config_id", {}, nothing)
+    _TaskManagerFactory._build_manager()._set(task)
+    job = Job(JobId("id"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._set(job)
+
+    _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, None)
+
+    assert job.status == Status.COMPLETED
+    assert job.stacktrace == []
+
+
+def test_update_job_status_with_one_exception():
+    task = Task("config_id", {}, nothing)
+    _TaskManagerFactory._build_manager()._set(task)
+    job = Job(JobId("id"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._set(job)
+    e = Exception("test")
+    _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, [e])
+
+    assert job.status == Status.FAILED
+    assert len(job.stacktrace) == 1
+    assert job.stacktrace[0] == "".join(traceback.format_exception(type(e), value=e, tb=e.__traceback__))
+
+
+def test_update_job_status_with_exceptions():
+    task = Task("config_id", {}, nothing)
+    _TaskManagerFactory._build_manager()._set(task)
+    job = Job(JobId("id"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._set(job)
+    e_1 = Exception("test1")
+    e_2 = Exception("test2")
+    _JobDispatcher(_OrchestratorFactory._orchestrator)._update_job_status(job, [e_1, e_2])
+
+    assert job.status == Status.FAILED
+    assert len(job.stacktrace) == 2
+    assert job.stacktrace[0] == "".join(traceback.format_exception(type(e_1), value=e_1, tb=e_1.__traceback__))
+    assert job.stacktrace[1] == "".join(traceback.format_exception(type(e_2), value=e_2, tb=e_2.__traceback__))

+ 0 - 163
tests/core/_orchestrator/_dispatcher/test_job_dispatcher.py

@@ -1,163 +0,0 @@
-# Copyright 2023 Avaiga Private Limited
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
-# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations under the License.
-
-import multiprocessing
-from concurrent.futures import ProcessPoolExecutor
-from functools import partial
-from unittest import mock
-from unittest.mock import MagicMock
-
-from pytest import raises
-
-from taipy.config.config import Config
-from taipy.core import DataNodeId, JobId, TaskId
-from taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher
-from taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher
-from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
-from taipy.core.config.job_config import JobConfig
-from taipy.core.data._data_manager import _DataManager
-from taipy.core.job.job import Job
-from taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory
-from taipy.core.task.task import Task
-from tests.core.utils import assert_true_after_time
-
-
-def execute(lock):
-    with lock:
-        ...
-    return None
-
-
-def _error():
-    raise RuntimeError("Something bad has happened")
-
-
-def test_build_development_job_dispatcher():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-    dispatcher = _OrchestratorFactory._dispatcher
-
-    assert isinstance(dispatcher, _DevelopmentJobDispatcher)
-    assert dispatcher._nb_available_workers == 1
-
-    with raises(NotImplementedError):
-        assert dispatcher.start()
-
-    assert dispatcher.is_running()
-
-    with raises(NotImplementedError):
-        dispatcher.stop()
-
-
-def test_build_standalone_job_dispatcher():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-    dispatcher = _OrchestratorFactory._dispatcher
-
-    assert not isinstance(dispatcher, _DevelopmentJobDispatcher)
-    assert isinstance(dispatcher, _StandaloneJobDispatcher)
-    assert isinstance(dispatcher._executor, ProcessPoolExecutor)
-    assert dispatcher._nb_available_workers == 2
-    assert_true_after_time(dispatcher.is_running)
-    dispatcher.stop()
-    dispatcher.join()
-    assert_true_after_time(lambda: not dispatcher.is_running())
-
-
-def test_can_execute_2_workers():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    m = multiprocessing.Manager()
-    lock = m.Lock()
-
-    task_id = TaskId("task_id1")
-    output = list(_DataManager._bulk_get_or_create([Config.configure_data_node("input1", default_data=21)]).values())
-
-    _OrchestratorFactory._build_dispatcher()
-
-    task = Task(
-        config_id="name",
-        properties={},
-        input=[],
-        function=partial(execute, lock),
-        output=output,
-        id=task_id,
-    )
-    job_id = JobId("id1")
-    job = Job(job_id, task, "submit_id", task.id)
-
-    dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator)
-
-    with lock:
-        assert dispatcher._can_execute()
-        dispatcher._dispatch(job)
-        assert dispatcher._can_execute()
-        dispatcher._dispatch(job)
-        assert not dispatcher._can_execute()
-
-    assert_true_after_time(lambda: dispatcher._can_execute())
-
-
-def test_can_execute_synchronous():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    task_id = TaskId("task_id1")
-    task = Task(config_id="name", properties={}, input=[], function=print, output=[], id=task_id)
-    submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX, task.config_id)
-    job_id = JobId("id1")
-    job = Job(job_id, task, submission.id, task.id)
-
-    dispatcher = _OrchestratorFactory._dispatcher
-
-    assert dispatcher._can_execute()
-    dispatcher._dispatch(job)
-    assert dispatcher._can_execute()
-
-
-def test_exception_in_user_function():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    task_id = TaskId("task_id1")
-    job_id = JobId("id1")
-    task = Task(config_id="name", properties={}, input=[], function=_error, output=[], id=task_id)
-    submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX, task.config_id)
-    job = Job(job_id, task, submission.id, task.id)
-
-    dispatcher = _OrchestratorFactory._dispatcher
-    dispatcher._dispatch(job)
-    assert job.is_failed()
-    assert 'RuntimeError("Something bad has happened")' in str(job.stacktrace[0])
-
-
-def test_exception_in_writing_data():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    task_id = TaskId("task_id1")
-    job_id = JobId("id1")
-    output = MagicMock()
-    output.id = DataNodeId("output_id")
-    output._config_id = "my_raising_datanode"
-    output._is_in_cache = False
-    output.write.side_effect = ValueError()
-    task = Task(config_id="name", properties={}, input=[], function=print, output=[output], id=task_id)
-    submission = _SubmissionManagerFactory._build_manager()._create(task_id, task._ID_PREFIX, task.config_id)
-    job = Job(job_id, task, submission.id, task.id)
-
-    dispatcher = _OrchestratorFactory._dispatcher
-
-    with mock.patch("taipy.core.data._data_manager._DataManager._get") as get:
-        get.return_value = output
-        dispatcher._dispatch(job)
-        assert job.is_failed()
-        assert "node" in job.stacktrace[0]

+ 134 - 0
tests/core/_orchestrator/_dispatcher/test_standalone_job_dispatcher.py

@@ -0,0 +1,134 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+import time
+from concurrent.futures import ProcessPoolExecutor, Future
+from unittest import mock
+from unittest.mock import call
+
+from taipy import Config
+from taipy.config._serializer._toml_serializer import _TomlSerializer
+from taipy.core import JobId
+from taipy.core._orchestrator._dispatcher import _StandaloneJobDispatcher
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+from taipy.core.job.job import Job
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+from taipy.core.task.task import Task
+from tests.core._orchestrator._dispatcher.mock_standalone_dispatcher import MockStandaloneDispatcher
+from tests.core.utils import assert_true_after_time
+
+
+def nothing(*args):
+    return
+
+
+def create_task():
+    task = Task("config_id", {}, nothing, [], [])
+    _TaskManagerFactory._build_manager()._set(task)
+    return task
+
+
+def test_init_default():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_dispatcher = _StandaloneJobDispatcher(orchestrator)
+
+    assert job_dispatcher.orchestrator == orchestrator
+    assert job_dispatcher.lock == orchestrator.lock
+    assert job_dispatcher._nb_available_workers == 1
+    assert isinstance(job_dispatcher._executor, ProcessPoolExecutor)
+
+
+def test_init_with_nb_workers():
+    Config.configure_job_executions(max_nb_of_workers=2)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_dispatcher = _StandaloneJobDispatcher(orchestrator)
+
+    assert job_dispatcher._nb_available_workers == 2
+
+
+def test_dispatch_job():
+    task = create_task()
+    job = Job(JobId("job"), task, "s_id", task.id)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    dispatcher = MockStandaloneDispatcher(orchestrator)
+
+    dispatcher._dispatch(job)
+
+    # test that the job execution is submitted to the executor
+    assert len(dispatcher.dispatch_calls) == 1
+    assert len(dispatcher._executor.submit_called) == 1
+    submit_first_call = dispatcher._executor.submit_called[0]
+    assert submit_first_call[0].job_id == job.id
+    assert submit_first_call[0].task == task
+    assert submit_first_call[1] == ()
+    assert submit_first_call[2]["config_as_string"] == _TomlSerializer()._serialize(Config._applied_config)
+
+    # test that the proc of the job is added to the list of dispatched jobs
+    assert len(dispatcher.set_dispatch_processes_calls) == 1
+    assert dispatcher.set_dispatch_processes_calls[0][0] == job.id
+    assert dispatcher.set_dispatch_processes_calls[0][1] == dispatcher._executor.f[0]
+
+    # test that the worker is released after the job is done
+    assert len(dispatcher.release_worker_calls) == 1
+
+    # test that the job status is updated after execution on future
+    assert len(dispatcher.update_job_status_from_future_calls) == 1
+    assert dispatcher.update_job_status_from_future_calls[0][0] == job
+    assert dispatcher.update_job_status_from_future_calls[0][1] == dispatcher._executor.f[0]
+
+
+def test_release_worker():
+    dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator)
+
+    assert dispatcher._nb_available_workers == 1
+    dispatcher._release_worker(None)
+    assert dispatcher._nb_available_workers == 2
+    dispatcher._release_worker(None)
+    assert dispatcher._nb_available_workers == 3
+
+
+def test_update_job_status_from_future():
+    task = create_task()
+    job = Job(JobId("job"), task, "s_id", task.id)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    dispatcher = _StandaloneJobDispatcher(orchestrator)
+    ft = Future()
+    ft.set_result(None)
+    dispatcher._set_dispatched_processes(job.id, ft)  # the job is dispatched to a process
+
+    dispatcher._update_job_status_from_future(job, ft)
+
+    assert len(dispatcher._dispatched_processes) == 0  # the job process is not stored anymore
+    assert job.is_completed()
+
+
+def test_run():
+    task = create_task()
+    job_1 = Job(JobId("job1"), task, "s_id", task.id)
+    job_2 = Job(JobId("job2"), task, "s_id", task.id)
+    job_3 = Job(JobId("job3"), task, "s_id", task.id)
+    job_4 = Job(JobId("job4"), task, "s_id", task.id)
+    _JobManagerFactory._build_manager()._set(job_1)
+    _JobManagerFactory._build_manager()._set(job_2)
+    _JobManagerFactory._build_manager()._set(job_3)
+    _JobManagerFactory._build_manager()._set(job_4)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    orchestrator.jobs_to_run.put(job_1)
+    orchestrator.jobs_to_run.put(job_2)
+    orchestrator.jobs_to_run.put(job_3)
+    orchestrator.jobs_to_run.put(job_4)
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher._execute_job") as mck:
+        dispatcher = _StandaloneJobDispatcher(orchestrator)
+        dispatcher.start()
+        assert_true_after_time(lambda: mck.call_count == 4, msg="The 4 jobs were not dequeued.", time=5)
+        dispatcher.stop()
+        mck.assert_has_calls([call(job_1), call(job_2), call(job_3), call(job_4)])

+ 133 - 0
tests/core/_orchestrator/_dispatcher/test_task_function_wrapper.py

@@ -0,0 +1,133 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+import random
+import string
+
+from taipy.config import Config
+from taipy.config._serializer._toml_serializer import _TomlSerializer
+from taipy.config.common.scope import Scope
+from taipy.config.exceptions import ConfigurationUpdateBlocked
+from taipy.core._orchestrator._dispatcher._task_function_wrapper import _TaskFunctionWrapper
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.data._data_manager import _DataManager
+from taipy.core.task.task import Task
+
+
+def _create_task(function, nb_outputs=1):
+    output_dn_config_id = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
+    dn_input_configs = [
+        Config.configure_data_node("input1", "pickle", Scope.SCENARIO, default_data=21),
+        Config.configure_data_node("input2", "pickle", Scope.SCENARIO, default_data=2),
+    ]
+    dn_output_configs = [
+        Config.configure_data_node(f"{output_dn_config_id}_output{i}", "pickle", Scope.SCENARIO, default_data=0)
+        for i in range(nb_outputs)
+    ]
+    input_dn = _DataManager._bulk_get_or_create(dn_input_configs).values()
+    output_dn = _DataManager._bulk_get_or_create(dn_output_configs).values()
+    return Task(
+        output_dn_config_id,
+        {},
+        function=function,
+        input=input_dn,
+        output=output_dn,
+    )
+
+
+def multiply(nb1: float, nb2: float):
+    return nb1 * nb2
+
+
+def test_execute_task_that_return_multiple_outputs():
+    def return_2tuple(nb1, nb2):
+        return multiply(nb1, nb2), multiply(nb1, nb2) / 2
+
+    def return_list(nb1, nb2):
+        return [multiply(nb1, nb2), multiply(nb1, nb2) / 2]
+
+    with_tuple = _create_task(return_2tuple, 2)
+    with_list = _create_task(return_list, 2)
+    _TaskFunctionWrapper("job_id_tuple", with_tuple).execute()
+    _TaskFunctionWrapper("job_id_list", with_list).execute()
+
+    assert (
+        with_tuple.output[f"{with_tuple.config_id}_output0"].read()
+        == with_list.output[f"{with_list.config_id}_output0"].read()
+        == 42
+    )
+    assert (
+        with_tuple.output[f"{with_tuple.config_id}_output1"].read()
+        == with_list.output[f"{with_list.config_id}_output1"].read()
+        == 21
+    )
+
+
+def test_execute_task_that_returns_single_iterable_output():
+    def return_2tuple(nb1, nb2):
+        return multiply(nb1, nb2), multiply(nb1, nb2) / 2
+
+    def return_list(nb1, nb2):
+        return [multiply(nb1, nb2), multiply(nb1, nb2) / 2]
+
+    task_with_tuple = _create_task(return_2tuple, 1)
+    task_with_list = _create_task(return_list, 1)
+    _TaskFunctionWrapper("job_id_tuple", task_with_tuple).execute()
+    _TaskFunctionWrapper("job_id_list", task_with_list).execute()
+
+    assert task_with_tuple.output[f"{task_with_tuple.config_id}_output0"].read() == (42, 21)
+    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
+    assert task_with_list.output[f"{task_with_list.config_id}_output0"].read() == [42, 21]
+    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
+
+
+def test_data_node_not_written_due_to_wrong_result_nb():
+    def fct_2_outputs():
+        return lambda nb1, nb2: (multiply(nb1, nb2), multiply(nb1, nb2) / 2)
+
+    task_expecting_3_outputs = _create_task(fct_2_outputs, 3)
+
+    exceptions = _TaskFunctionWrapper("job_id", task_expecting_3_outputs).execute()
+
+    assert len(exceptions) == 1
+    assert isinstance(exceptions[0], Exception)
+
+
+def test_cannot_exec_task_that_update_config():
+    def update_config_fct(n, m):
+        from taipy.config import Config
+
+        Config.core.storage_folder = ".new_storage_folder/"
+        return n * m
+
+    task_updating_cfg = _create_task(update_config_fct)
+    cfg_as_str = _TomlSerializer()._serialize(Config._applied_config)
+    res = _TaskFunctionWrapper("job_id", task_updating_cfg).execute(config_as_string=cfg_as_str)
+
+    assert len(res) == 1
+    assert isinstance(res[0], ConfigurationUpdateBlocked)
+
+
+def test_can_execute_task_with_a_modified_config():
+    def assert_config_is_correct_after_serialization(n, m):
+        from taipy.config import Config
+
+        assert Config.core.storage_folder == ".my_data/"
+        assert Config.core.custom_property == "custom_property"
+        return n * m
+
+    Config.configure_core(storage_folder=".my_data/", custom_property="custom_property")
+
+    task_asserting_cfg_is_correct = _create_task(assert_config_is_correct_after_serialization)
+    cfg_as_str = _TomlSerializer()._serialize(Config._applied_config)
+    res = _TaskFunctionWrapper("job_id", task_asserting_cfg_is_correct).execute(config_as_string=cfg_as_str)
+
+    assert len(res) == 0  # no exception raised so the asserts in the fct passed

+ 9 - 886
tests/core/_orchestrator/test_orchestrator.py

@@ -12,30 +12,22 @@
 import multiprocessing
 import multiprocessing
 import random
 import random
 import string
 import string
-from concurrent.futures import ProcessPoolExecutor
-from datetime import datetime, timedelta
 from functools import partial
 from functools import partial
 from time import sleep
 from time import sleep
 
 
 import pytest
 import pytest
-from tests.core.utils import assert_true_after_time
 
 
 from taipy.config import Config
 from taipy.config import Config
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
-from taipy.config.exceptions.exceptions import ConfigurationUpdateBlocked
-from taipy.core import taipy
 from taipy.core._orchestrator._orchestrator import _Orchestrator
 from taipy.core._orchestrator._orchestrator import _Orchestrator
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
 from taipy.core.config.job_config import JobConfig
 from taipy.core.config.job_config import JobConfig
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
-from taipy.core.scenario._scenario_manager import _ScenarioManager
 from taipy.core.scenario.scenario import Scenario
 from taipy.core.scenario.scenario import Scenario
-from taipy.core.sequence.sequence import Sequence
 from taipy.core.submission._submission_manager import _SubmissionManager
 from taipy.core.submission._submission_manager import _SubmissionManager
 from taipy.core.submission.submission_status import SubmissionStatus
 from taipy.core.submission.submission_status import SubmissionStatus
-from taipy.core.task._task_manager import _TaskManager
 from taipy.core.task.task import Task
 from taipy.core.task.task import Task
-from taipy.core.data.pickle import PickleDataNode
+from tests.core.utils import assert_true_after_time
 
 
 
 
 # ################################  USER FUNCTIONS  ##################################
 # ################################  USER FUNCTIONS  ##################################
@@ -55,563 +47,7 @@ def mult_by_2(n):
     return n * 2
     return n * 2
 
 
 
 
-def nothing():
-    return True
-
-
-def concat(a, b):
-    return a + b
-
-
-def _error():
-    raise Exception
-
-
-# ################################  TEST METHODS    ##################################
-
-
-def test_submit_task():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    before_creation = datetime.now()
-    sleep(0.1)
-    task = _create_task(multiply)
-    output_dn_id = task.output[f"{task.config_id}_output0"].id
-
-    _OrchestratorFactory._build_dispatcher()
-
-    assert _DataManager._get(output_dn_id).last_edit_date > before_creation
-    assert _DataManager._get(output_dn_id).job_ids == []
-    assert _DataManager._get(output_dn_id).is_ready_for_reading
-
-    before_submission_creation = datetime.now()
-    sleep(0.1)
-    job = _Orchestrator.submit_task(task)
-    sleep(0.1)
-    after_submission_creation = datetime.now()
-    assert _DataManager._get(output_dn_id).read() == 42
-    assert _DataManager._get(output_dn_id).last_edit_date > before_submission_creation
-    assert _DataManager._get(output_dn_id).last_edit_date < after_submission_creation
-    assert _DataManager._get(output_dn_id).job_ids == [job.id]
-    assert _DataManager._get(output_dn_id).is_ready_for_reading
-    assert job.is_completed()
-    assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-
-
-def test_submit_sequence_generate_unique_submit_id():
-    dn_1 = PickleDataNode("dn_config_id_1", Scope.SCENARIO)
-    dn_2 = PickleDataNode("dn_config_id_2", Scope.SCENARIO)
-    task_1 = Task("task_config_id_1", {}, print, [dn_1])
-    task_2 = Task("task_config_id_2", {}, print, [dn_1], [dn_2])
-
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-
-    scenario = Scenario("scenario", [task_1, task_2], {}, sequences={"sequence": {"tasks": [task_1, task_2]}})
-    _ScenarioManager._set(scenario)
-
-    sequence = scenario.sequences["sequence"]
-
-    jobs_1 = taipy.submit(sequence)
-    jobs_2 = taipy.submit(sequence)
-    assert len(jobs_1) == 2
-    assert len(jobs_2) == 2
-    submit_ids_1 = [job.submit_id for job in jobs_1]
-    submit_ids_2 = [job.submit_id for job in jobs_2]
-    assert len(set(submit_ids_1)) == 1
-    assert len(set(submit_ids_2)) == 1
-    assert set(submit_ids_1) != set(submit_ids_2)
-
-
-def test_submit_scenario_generate_unique_submit_id():
-    dn_1 = PickleDataNode("dn_config_id_1", Scope.SCENARIO)
-    dn_2 = PickleDataNode("dn_config_id_2", Scope.SCENARIO)
-    dn_3 = PickleDataNode("dn_config_id_3", Scope.SCENARIO)
-    task_1 = Task("task_config_id_1", {}, print, [dn_1])
-    task_2 = Task("task_config_id_2", {}, print, [dn_2])
-    task_3 = Task("task_config_id_3", {}, print, [dn_3])
-    scenario = Scenario("scenario_config_id", [task_1, task_2, task_3], {})
-
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _TaskManager._set(task_3)
-    _ScenarioManager._set(scenario)
-
-    jobs_1 = taipy.submit(scenario)
-    jobs_2 = taipy.submit(scenario)
-
-    assert len(jobs_1) == 3
-    assert len(jobs_2) == 3
-
-
-def test_submit_entity_store_entity_id_in_job():
-    dn_1 = PickleDataNode("dn_config_id_1", Scope.SCENARIO)
-    dn_2 = PickleDataNode("dn_config_id_2", Scope.SCENARIO)
-    dn_3 = PickleDataNode("dn_config_id_3", Scope.SCENARIO)
-    task_1 = Task("task_config_id_1", {}, print, [dn_1])
-    task_2 = Task("task_config_id_2", {}, print, [dn_2])
-    task_3 = Task("task_config_id_3", {}, print, [dn_3])
-    scenario = Scenario("scenario_config_id", [task_1, task_2, task_3], {})
-
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _TaskManager._set(task_3)
-    _ScenarioManager._set(scenario)
-
-    jobs_1 = taipy.submit(scenario)
-    assert all(job.submit_entity_id == scenario.id for job in jobs_1)
-
-    job_1 = taipy.submit(task_1)
-    assert job_1.submit_entity_id == task_1.id
-
-
-def test_submit_task_that_return_multiple_outputs():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    def return_2tuple(nb1, nb2):
-        return multiply(nb1, nb2), multiply(nb1, nb2) / 2
-
-    def return_list(nb1, nb2):
-        return [multiply(nb1, nb2), multiply(nb1, nb2) / 2]
-
-    with_tuple = _create_task(return_2tuple, 2)
-    with_list = _create_task(return_list, 2)
-
-    _OrchestratorFactory._build_dispatcher()
-
-    _Orchestrator.submit_task(with_tuple)
-    _Orchestrator.submit_task(with_list)
-
-    assert (
-        with_tuple.output[f"{with_tuple.config_id}_output0"].read()
-        == with_list.output[f"{with_list.config_id}_output0"].read()
-        == 42
-    )
-    assert (
-        with_tuple.output[f"{with_tuple.config_id}_output1"].read()
-        == with_list.output[f"{with_list.config_id}_output1"].read()
-        == 21
-    )
-
-
-def test_submit_task_returns_single_iterable_output():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    def return_2tuple(nb1, nb2):
-        return multiply(nb1, nb2), multiply(nb1, nb2) / 2
-
-    def return_list(nb1, nb2):
-        return [multiply(nb1, nb2), multiply(nb1, nb2) / 2]
-
-    task_with_tuple = _create_task(return_2tuple, 1)
-    task_with_list = _create_task(return_list, 1)
-
-    _OrchestratorFactory._build_dispatcher()
-
-    _Orchestrator.submit_task(task_with_tuple)
-    assert task_with_tuple.output[f"{task_with_tuple.config_id}_output0"].read() == (42, 21)
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-    _Orchestrator.submit_task(task_with_list)
-    assert task_with_list.output[f"{task_with_list.config_id}_output0"].read() == [42, 21]
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-
-
-def test_data_node_not_written_due_to_wrong_result_nb():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    def return_2tuple():
-        return lambda nb1, nb2: (multiply(nb1, nb2), multiply(nb1, nb2) / 2)
-
-    task = _create_task(return_2tuple(), 3)
-
-    _OrchestratorFactory._build_dispatcher()
-
-    job = _Orchestrator.submit_task(task)
-    assert task.output[f"{task.config_id}_output0"].read() == 0
-    assert job.is_failed()
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-    assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED
-
-
-def test_scenario_only_submit_same_task_once():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    dn_0 = PickleDataNode("dn_config_0", Scope.SCENARIO, properties={"default_data": 0})
-    dn_1 = PickleDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dn_2 = PickleDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    task_1 = Task("task_config_1", {}, print, input=[dn_0], output=[dn_1], id="task_1")
-    task_2 = Task("task_config_2", {}, print, input=[dn_1], id="task_2")
-    task_3 = Task("task_config_3", {}, print, input=[dn_1], output=[dn_2], id="task_3")
-    scenario_1 = Scenario(
-        "scenario_config_1",
-        [task_1, task_2, task_3],
-        {},
-        "scenario_1",
-        sequences={"sequence_1": {"tasks": [task_1, task_2]}, "sequence_2": {"tasks": [task_1, task_3]}},
-    )
-    sequence_1 = scenario_1.sequences["sequence_1"]
-    sequence_2 = scenario_1.sequences["sequence_2"]
-
-    jobs = _Orchestrator.submit(scenario_1)
-    assert len(jobs) == 3
-    assert all([job.is_completed() for job in jobs])
-    assert all(not _Orchestrator._is_blocked(job) for job in jobs)
-    assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED
-
-    jobs = _Orchestrator.submit(sequence_1)
-    assert len(jobs) == 2
-    assert all([job.is_completed() for job in jobs])
-    assert all(not _Orchestrator._is_blocked(job) for job in jobs)
-    assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED
-
-    jobs = _Orchestrator.submit(sequence_2)
-    assert len(jobs) == 2
-    assert all([job.is_completed() for job in jobs])
-    assert all(not _Orchestrator._is_blocked(job) for job in jobs)
-    assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED
-
-
-def test_update_status_fail_job():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    dn_0 = PickleDataNode("dn_config_0", Scope.SCENARIO, properties={"default_data": 0})
-    dn_1 = PickleDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dn_2 = PickleDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    task_0 = Task("task_config_0", {}, _error, output=[dn_0], id="task_0")
-    task_1 = Task("task_config_1", {}, print, input=[dn_0], output=[dn_1], id="task_1")
-    task_2 = Task("task_config_2", {}, print, input=[dn_1], id="task_2")
-    task_3 = Task("task_config_3", {}, print, input=[dn_2], id="task_3")
-    scenario_1 = Scenario("scenario_config_1", [task_0, task_1, task_2, task_3], {}, "scenario_1")
-    scenario_2 = Scenario("scenario_config_2", [task_0, task_1, task_2, task_3], {}, "scenario_2")
-
-    _DataManager._set(dn_0)
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_0)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _TaskManager._set(task_3)
-    _ScenarioManager._set(scenario_1)
-    _ScenarioManager._set(scenario_2)
-
-    job = _Orchestrator.submit_task(task_0)
-    assert job.is_failed()
-    assert _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED
-
-    jobs = _Orchestrator.submit(scenario_1)
-    tasks_jobs = {job._task.id: job for job in jobs}
-    assert tasks_jobs["task_0"].is_failed()
-    assert all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]])
-    assert tasks_jobs["task_3"].is_completed()
-    assert all(not _Orchestrator._is_blocked(job) for job in jobs)
-    assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED
-
-    jobs = _Orchestrator.submit(scenario_2)
-    tasks_jobs = {job._task.id: job for job in jobs}
-    assert tasks_jobs["task_0"].is_failed()
-    assert all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]])
-    assert tasks_jobs["task_3"].is_completed()
-    assert all(not _Orchestrator._is_blocked(job) for job in jobs)
-    assert _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED
-
-
-def test_update_status_fail_job_in_parallel_one_job():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    dn = PickleDataNode("dn_config_0", Scope.SCENARIO, properties={"default_data": 0})
-    task = Task("task_config_0", {}, _error, output=[dn], id="task_0")
-    _DataManager._set(dn)
-    _TaskManager._set(task)
-    job = _Orchestrator.submit_task(task)
-    assert_true_after_time(job.is_failed)
-    assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED)
-
-
-def test_update_status_fail_job_in_parallel_one_sequence():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    dn_0 = PickleDataNode("dn_config_0", Scope.SCENARIO, properties={"default_data": 0})
-    dn_1 = PickleDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dn_2 = PickleDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    task_0 = Task("task_config_0", {}, _error, output=[dn_0], id="task_0")
-    task_1 = Task("task_config_1", {}, print, input=[dn_0], output=[dn_1], id="task_1")
-    task_2 = Task("task_config_2", {}, print, input=[dn_1], id="task_2")
-    task_3 = Task("task_config_3", {}, print, input=[dn_2], id="task_3")
-    sc = Scenario(
-        "scenario_config_1",
-        set([task_0, task_1, task_2, task_3]),
-        {},
-        set(),
-        "scenario_1",
-        sequences={"sequence_1": {"tasks": [task_0, task_1, task_2]}},
-    )
-    _DataManager._set(dn_0)
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_0)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _TaskManager._set(task_3)
-    _ScenarioManager._set(sc)
-
-    jobs = _Orchestrator.submit(sc.sequences["sequence_1"])
-
-    tasks_jobs = {job._task.id: job for job in jobs}
-    assert_true_after_time(tasks_jobs["task_0"].is_failed)
-    assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]]))
-    assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs))
-    submit_id = jobs[0].submit_id
-    submission = _SubmissionManager._get(submit_id)
-    assert_true_after_time(lambda: submission.submission_status == SubmissionStatus.FAILED)
-
-
-def test_update_status_fail_job_in_parallel_one_scenario():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    dn_0 = PickleDataNode("dn_config_0", Scope.SCENARIO, properties={"default_data": 0})
-    dn_1 = PickleDataNode("dn_config_1", Scope.SCENARIO, properties={"default_data": 1})
-    dn_2 = PickleDataNode("dn_config_2", Scope.SCENARIO, properties={"default_data": 2})
-    task_0 = Task("task_config_0", {}, _error, output=[dn_0], id="task_0")
-    task_1 = Task("task_config_1", {}, print, input=[dn_0], output=[dn_1], id="task_1")
-    task_2 = Task("task_config_2", {}, print, input=[dn_1], id="task_2")
-    task_3 = Task("task_config_3", {}, print, input=[dn_2], id="task_3")
-    sc = Scenario("scenario_config_1", set([task_0, task_1, task_2, task_3]), {}, set(), "scenario_1")
-
-    _DataManager._set(dn_0)
-    _DataManager._set(dn_1)
-    _DataManager._set(dn_2)
-    _TaskManager._set(task_0)
-    _TaskManager._set(task_1)
-    _TaskManager._set(task_2)
-    _TaskManager._set(task_3)
-    _ScenarioManager._set(sc)
-
-    jobs = _Orchestrator.submit(sc)
-
-    tasks_jobs = {job._task.id: job for job in jobs}
-    assert_true_after_time(tasks_jobs["task_0"].is_failed)
-    assert_true_after_time(tasks_jobs["task_3"].is_completed)
-    assert_true_after_time(lambda: all([job.is_abandoned() for job in [tasks_jobs["task_1"], tasks_jobs["task_2"]]]))
-    assert_true_after_time(lambda: all(not _Orchestrator._is_blocked(job) for job in jobs))
-    submit_id = jobs[0].submit_id
-    submission = _SubmissionManager._get(submit_id)
-    assert_true_after_time(lambda: submission.submission_status == SubmissionStatus.FAILED)
-
-
-def test_submit_task_in_parallel():
-    m = multiprocessing.Manager()
-    lock = m.Lock()
-
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    task = _create_task(partial(lock_multiply, lock))
-
-    _OrchestratorFactory._build_dispatcher()
-
-    with lock:
-        assert task.output[f"{task.config_id}_output0"].read() == 0
-        job = _Orchestrator.submit_task(task)
-        assert_true_after_time(job.is_running)
-        assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-        assert_true_after_time(
-            lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING
-        )
-
-    assert_true_after_time(lambda: task.output[f"{task.config_id}_output0"].read() == 42)
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-
-
-def test_submit_sequence_in_parallel():
-    m = multiprocessing.Manager()
-    lock = m.Lock()
-
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    task = _create_task(partial(lock_multiply, lock))
-    sequence = Sequence({}, [task], "sequence_id")
-
-    _OrchestratorFactory._build_dispatcher()
-
-    with lock:
-        assert task.output[f"{task.config_id}_output0"].read() == 0
-        job = _Orchestrator.submit(sequence)[0]
-        assert_true_after_time(job.is_running)
-        assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-        assert_true_after_time(
-            lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING
-        )
-
-    assert_true_after_time(lambda: task.output[f"{task.config_id}_output0"].read() == 42)
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-
-
-def test_submit_scenario_in_parallel():
-    m = multiprocessing.Manager()
-    lock = m.Lock()
-
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    task = _create_task(partial(lock_multiply, lock))
-    scenario = Scenario("scenario_config", [task], {}, [], "scenario_id")
-
-    _OrchestratorFactory._build_dispatcher()
-
-    with lock:
-        assert task.output[f"{task.config_id}_output0"].read() == 0
-        job = _Orchestrator.submit(scenario)[0]
-        assert_true_after_time(job.is_running)
-        assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-        assert_true_after_time(
-            lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.RUNNING
-        )
-
-    assert_true_after_time(lambda: task.output[f"{task.config_id}_output0"].read() == 42)
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-    assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
-
-
-def sleep_fct(seconds):
-    sleep(seconds)
-
-
-def sleep_and_raise_error_fct(seconds):
-    sleep(seconds)
-    raise Exception
-
-
-def test_submit_task_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep, sleep_period))
-    job = _Orchestrator.submit_task(task, wait=True)
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
-def test_submit_sequence_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep, sleep_period))
-    sequence = Sequence({}, [task], "sequence_id")
-
-    job = _Orchestrator.submit(sequence, wait=True)[0]
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
-def test_submit_scenario_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep, sleep_period))
-    scenario = Scenario("scenario_config", [task], {})
-
-    job = _Orchestrator.submit(scenario, wait=True)[0]
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
-def test_submit_fail_task_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1.0
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep_and_raise_error_fct, sleep_period))
-    job = _Orchestrator.submit_task(task, wait=True)
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_failed)
-    assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED)
-
-
-def test_submit_fail_sequence_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1.0
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep_and_raise_error_fct, sleep_period))
-    sequence = Sequence({}, [task], "sequence_id")
-
-    job = _Orchestrator.submit(sequence, wait=True)[0]
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_failed)
-    assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED)
-
-
-def test_submit_fail_scenario_synchronously_in_parallel():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    sleep_period = 1.0
-    start_time = datetime.now()
-    task = Task("sleep_task", {}, function=partial(sleep_and_raise_error_fct, sleep_period))
-    scenario = Scenario("scenario_config", [task], {})
-
-    job = _Orchestrator.submit(scenario, wait=True)[0]
-    assert (datetime.now() - start_time).seconds >= sleep_period
-    assert_true_after_time(job.is_failed)
-    assert_true_after_time(lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.FAILED)
-
-
-def test_submit_task_synchronously_in_parallel_with_timeout():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    _OrchestratorFactory._build_dispatcher()
-
-    task_duration = 2
-    timeout_duration = task_duration - 1
-    task = Task("sleep_task", {}, function=partial(sleep, task_duration))
-
-    start_time = datetime.now()
-    job = _Orchestrator.submit_task(task, wait=True, timeout=timeout_duration)
-    end_time = datetime.now()
-
-    assert timeout_duration <= (end_time - start_time).seconds
-    assert_true_after_time(job.is_completed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
+@pytest.mark.orchestrator_dispatcher
 def test_submit_task_multithreading_multiple_task():
 def test_submit_task_multithreading_multiple_task():
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
 
 
@@ -664,56 +100,8 @@ def test_submit_task_multithreading_multiple_task():
     assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED
     assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED
 
 
 
 
-def test_submit_sequence_multithreading_multiple_task():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    m = multiprocessing.Manager()
-    lock_1 = m.Lock()
-    lock_2 = m.Lock()
-
-    task_1 = _create_task(partial(lock_multiply, lock_1))
-    task_2 = _create_task(partial(lock_multiply, lock_2))
-
-    sequence = Sequence({}, [task_1, task_2], "sequence_id")
-
-    _OrchestratorFactory._build_dispatcher()
-
-    with lock_1:
-        with lock_2:
-            tasks_jobs = {job._task.id: job for job in _Orchestrator.submit(sequence)}
-            job_1 = tasks_jobs[task_1.id]
-            job_2 = tasks_jobs[task_2.id]
-
-            assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
-            assert task_2.output[f"{task_2.config_id}_output0"].read() == 0
-            assert_true_after_time(job_1.is_running)
-            assert_true_after_time(job_2.is_running)
-            assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2)
-            assert_true_after_time(
-                lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING
-            )
-
-        assert_true_after_time(lambda: task_2.output[f"{task_2.config_id}_output0"].read() == 42)
-        assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
-        assert_true_after_time(job_2.is_completed)
-        assert_true_after_time(job_1.is_running)
-        assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-        assert_true_after_time(
-            lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING
-        )
-
-    assert_true_after_time(lambda: task_1.output[f"{task_1.config_id}_output0"].read() == 42)
-    assert_true_after_time(job_1.is_completed)
-    assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-    assert job_2.is_completed()
-    assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED
-
-
-def test_submit_scenario_multithreading_multiple_task():
+@pytest.mark.orchestrator_dispatcher
+def test_submit_submittable_multithreading_multiple_task():
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
 
 
     m = multiprocessing.Manager()
     m = multiprocessing.Manager()
@@ -759,8 +147,8 @@ def test_submit_scenario_multithreading_multiple_task():
     )
     )
 
 
 
 
+@pytest.mark.orchestrator_dispatcher
 def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_status():
 def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_status():
-    # TODO
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
 
 
     m = multiprocessing.Manager()
     m = multiprocessing.Manager()
@@ -842,6 +230,7 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
     assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED
     assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.COMPLETED
 
 
 
 
+@pytest.mark.orchestrator_dispatcher
 def test_blocked_task():
 def test_blocked_task():
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
 
 
@@ -874,9 +263,7 @@ def test_blocked_task():
     assert len(_Orchestrator.blocked_jobs) == 1
     assert len(_Orchestrator.blocked_jobs) == 1
     with lock_2:
     with lock_2:
         with lock_1:
         with lock_1:
-            job_1 = _Orchestrator.submit_task(
-                task_1,
-            )  # job 1 is submitted and locked
+            job_1 = _Orchestrator.submit_task(task_1)  # job 1 is submitted and locked
             assert_true_after_time(job_1.is_running)  # so it is still running
             assert_true_after_time(job_1.is_running)  # so it is still running
             assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
             assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
             assert not _DataManager._get(task_1.bar.id).is_ready_for_reading  # And bar still not ready
             assert not _DataManager._get(task_1.bar.id).is_ready_for_reading  # And bar still not ready
@@ -909,63 +296,8 @@ def test_blocked_task():
     )
     )
 
 
 
 
-def test_blocked_sequence():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    m = multiprocessing.Manager()
-    lock_1 = m.Lock()
-    lock_2 = m.Lock()
-
-    foo_cfg = Config.configure_data_node("foo", default_data=1)
-    bar_cfg = Config.configure_data_node("bar")
-    baz_cfg = Config.configure_data_node("baz")
-
-    _OrchestratorFactory._build_dispatcher()
-
-    dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg])
-    foo = dns[foo_cfg]
-    bar = dns[bar_cfg]
-    baz = dns[baz_cfg]
-    task_1 = Task("by_2", {}, partial(lock_multiply, lock_1, 2), [foo], [bar])
-    task_2 = Task("by_3", {}, partial(lock_multiply, lock_2, 3), [bar], [baz])
-    sequence = Sequence({}, [task_1, task_2], "sequence_id")
-
-    assert task_1.foo.is_ready_for_reading  # foo is ready
-    assert not task_1.bar.is_ready_for_reading  # But bar is not ready
-    assert not task_2.baz.is_ready_for_reading  # neither does baz
-
-    assert len(_Orchestrator.blocked_jobs) == 0
-    with lock_2:
-        with lock_1:
-            jobs = _Orchestrator.submit(sequence)  # sequence is submitted
-            tasks_jobs = {job._task.id: job for job in jobs}
-            job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id]
-            assert_true_after_time(job_1.is_running)  # job 1 is submitted and locked so it is still running
-            assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-            assert not _DataManager._get(task_1.bar.id).is_ready_for_reading  # And bar still not ready
-            assert_true_after_time(job_2.is_blocked)  # the job_2 remains blocked
-            assert_true_after_time(
-                lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING
-            )
-        assert_true_after_time(job_1.is_completed)  # job1 unlocked and can complete
-        assert _DataManager._get(task_1.bar.id).is_ready_for_reading  # bar becomes ready
-        assert _DataManager._get(task_1.bar.id).read() == 2  # the data is computed and written
-        assert_true_after_time(job_2.is_running)  # And job 2 can start running
-        assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1)
-        assert len(_Orchestrator.blocked_jobs) == 0
-        assert_true_after_time(
-            lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.RUNNING
-        )
-    assert_true_after_time(job_2.is_completed)  # job 2 unlocked so it can complete
-    assert _DataManager._get(task_2.baz.id).is_ready_for_reading  # baz becomes ready
-    assert _DataManager._get(task_2.baz.id).read() == 6  # the data is computed and written
-    assert_true_after_time(lambda: len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
-def test_blocked_scenario():
+@pytest.mark.orchestrator_dispatcher
+def test_blocked_submittable():
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
 
 
     m = multiprocessing.Manager()
     m = multiprocessing.Manager()
@@ -1021,212 +353,7 @@ def test_blocked_scenario():
     )
     )
 
 
 
 
-def test_task_orchestrator_create_synchronous_dispatcher():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-    _OrchestratorFactory._build_dispatcher()
-
-    assert _OrchestratorFactory._dispatcher._nb_available_workers == 1
-
-
-def test_task_orchestrator_create_standalone_dispatcher():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=3)
-    _OrchestratorFactory._build_dispatcher()
-    assert isinstance(_OrchestratorFactory._dispatcher._executor, ProcessPoolExecutor)
-    assert _OrchestratorFactory._dispatcher._nb_available_workers == 3
-
-
-def modified_config_task(n):
-    from taipy.config import Config
-
-    assert_true_after_time(lambda: Config.core.storage_folder == ".my_data/")
-    assert_true_after_time(lambda: Config.core.custom_property == "custom_property")
-    return n * 2
-
-
-def test_can_exec_task_with_modified_config():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-    Config.configure_core(storage_folder=".my_data/", custom_property="custom_property")
-
-    dn_input_config = Config.configure_data_node("input", "pickle", scope=Scope.SCENARIO, default_data=1)
-    dn_output_config = Config.configure_data_node("output", "pickle")
-    task_config = Config.configure_task("task_config", modified_config_task, dn_input_config, dn_output_config)
-    scenario_config = Config.configure_scenario("scenario_config", [task_config])
-
-    _OrchestratorFactory._build_dispatcher()
-
-    scenario = _ScenarioManager._create(scenario_config)
-
-    jobs = scenario.submit()
-    assert_true_after_time(jobs[0].is_finished, time=120)
-    assert_true_after_time(
-        jobs[0].is_completed
-    )  # If the job is completed, that means the asserts in the task are successful
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.COMPLETED
-    )
-
-
-def update_config_task(n):
-    from taipy.config import Config
-
-    # The exception will be saved to logger, and there is no way to check for it,
-    # so it will be checked here
-    with pytest.raises(ConfigurationUpdateBlocked):
-        Config.core.storage_folder = ".new_storage_folder/"
-    with pytest.raises(ConfigurationUpdateBlocked):
-        Config.core.properties = {"custom_property": "new_custom_property"}
-
-    Config.core.storage_folder = ".new_storage_folder/"
-    Config.core.properties = {"custom_property": "new_custom_property"}
-
-    return n * 2
-
-
-def test_cannot_exec_task_that_update_config():
-    """
-    _ConfigBlocker singleton is not passed to the subprocesses. That means in each subprocess,
-    the config update will not be blocked.
-
-    After rebuilding a new Config in each subprocess, the Config should be blocked.
-    """
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
-
-    dn_input_config = Config.configure_data_node("input", "pickle", scope=Scope.SCENARIO, default_data=1)
-    dn_output_config = Config.configure_data_node("output", "pickle")
-    task_config = Config.configure_task("task_config", update_config_task, dn_input_config, dn_output_config)
-    scenario_config = Config.configure_scenario("scenario_config", [task_config])
-
-    _OrchestratorFactory._build_dispatcher()
-
-    scenario = _ScenarioManager._create(scenario_config)
-
-    jobs = scenario.submit()
-
-    # The job should fail due to an exception is raised
-    assert_true_after_time(jobs[0].is_failed)
-    assert_true_after_time(
-        lambda: _SubmissionManager._get(jobs[0].submit_id).submission_status == SubmissionStatus.FAILED
-    )
-
-
-def test_can_execute_task_with_development_mode():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    dn_input_config = Config.configure_data_node("input", "pickle", scope=Scope.SCENARIO, default_data=1)
-    dn_output_config = Config.configure_data_node("output", "pickle")
-    task_config = Config.configure_task("task_config", mult_by_2, dn_input_config, dn_output_config)
-    scenario_config = Config.configure_scenario("scenario_config", [task_config])
-
-    _OrchestratorFactory._build_dispatcher()
-
-    scenario = _ScenarioManager._create(scenario_config)
-    scenario.submit()
-    while scenario.output.edit_in_progress:
-        sleep(1)
-    assert 2 == scenario.output.read()
-
-
-def test_need_to_run_no_output():
-    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
-    world_cfg = Config.configure_data_node("world", default_data="world !")
-    task_cfg = Config.configure_task("name", input=[hello_cfg, world_cfg], function=concat, output=[])
-    task = _create_task_from_config(task_cfg)
-
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-
-
-def test_need_to_run_task_not_skippable():
-    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
-    world_cfg = Config.configure_data_node("world", default_data="world !")
-    hello_world_cfg = Config.configure_data_node("hello_world")
-    task_cfg = Config.configure_task(
-        "name", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=False
-    )
-    task = _create_task_from_config(task_cfg)
-
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-
-
-def test_need_to_run_skippable_task_no_input():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    hello_world_cfg = Config.configure_data_node("hello_world")
-    task_cfg = Config.configure_task("name", input=[], function=nothing, output=[hello_world_cfg], skippable=True)
-
-    _OrchestratorFactory._build_dispatcher()
-
-    task = _create_task_from_config(task_cfg)
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-    _Orchestrator.submit_task(task)
-
-    assert not _OrchestratorFactory._dispatcher._needs_to_run(task)
-
-
-def test_need_to_run_skippable_task_no_validity_period_on_output():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
-    world_cfg = Config.configure_data_node("world", default_data="world !")
-    hello_world_cfg = Config.configure_data_node("hello_world")
-    task_cfg = Config.configure_task(
-        "name", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True
-    )
-
-    _OrchestratorFactory._build_dispatcher()
-
-    task = _create_task_from_config(task_cfg)
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-    _Orchestrator.submit_task(task)
-
-    assert not _OrchestratorFactory._dispatcher._needs_to_run(task)
-
-
-def test_need_to_run_skippable_task_with_validity_period_is_valid_on_output():
-    Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
-
-    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
-    world_cfg = Config.configure_data_node("world", default_data="world !")
-    hello_world_cfg = Config.configure_data_node("hello_world", validity_days=1)
-    task_cfg = Config.configure_task(
-        "name", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True
-    )
-    _OrchestratorFactory._build_dispatcher()
-
-    task = _create_task_from_config(task_cfg)
-
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-    job = _Orchestrator.submit_task(task)
-
-    assert not _OrchestratorFactory._dispatcher._needs_to_run(task)
-    job_skipped = _Orchestrator.submit_task(task)
-
-    assert job.is_completed()
-    assert job.is_finished()
-    assert job_skipped.is_skipped()
-    assert job_skipped.is_finished()
-
-
-def test_need_to_run_skippable_task_with_validity_period_obsolete_on_output():
-    hello_cfg = Config.configure_data_node("hello", default_data="Hello ")
-    world_cfg = Config.configure_data_node("world", default_data="world !")
-    hello_world_cfg = Config.configure_data_node("hello_world", validity_days=1)
-    task_cfg = Config.configure_task(
-        "name", input=[hello_cfg, world_cfg], function=concat, output=[hello_world_cfg], skippable=True
-    )
-    task = _create_task_from_config(task_cfg)
-
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-    _Orchestrator.submit_task(task)
-
-    output = task.hello_world
-    output._last_edit_date = datetime.now() - timedelta(days=1, minutes=30)
-    _DataManager()._set(output)
-    assert _OrchestratorFactory._dispatcher._needs_to_run(task)
-
-
 # ################################  UTIL METHODS    ##################################
 # ################################  UTIL METHODS    ##################################
-
-
 def _create_task(function, nb_outputs=1):
 def _create_task(function, nb_outputs=1):
     output_dn_config_id = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
     output_dn_config_id = "".join(random.choice(string.ascii_lowercase) for _ in range(10))
     dn_input_configs = [
     dn_input_configs = [
@@ -1246,7 +373,3 @@ def _create_task(function, nb_outputs=1):
         input=input_dn,
         input=input_dn,
         output=output_dn,
         output=output_dn,
     )
     )
-
-
-def _create_task_from_config(task_cfg):
-    return _TaskManager()._bulk_get_or_create([task_cfg])[0]

+ 178 - 0
tests/core/_orchestrator/test_orchestrator__cancel_jobs.py

@@ -0,0 +1,178 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from taipy import Status, Job, JobId
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def create_job(status):
+    t_cfg = Config.configure_task("no_output", nothing, [], [])
+    t = _TaskManagerFactory._build_manager()._bulk_get_or_create([t_cfg])
+    job = Job(JobId("foo"), t[0], "", "")
+    _JobManagerFactory._build_manager()._set(job)
+    job.status = status
+    return job
+
+
+def create_scenario():
+    # dn_0 --> t1 --> dn_1 --> t2 --> dn_2 --> t3 --> dn_3
+    #                  \
+    #                   \--> t2_bis
+    dn_0 = Config.configure_data_node("dn_0", default_data=0)
+    dn_1 = Config.configure_data_node("dn_1")
+    dn_2 = Config.configure_data_node("dn_2")
+    dn_3 = Config.configure_data_node("dn_3")
+    t1 = Config.configure_task("t1", nothing, [dn_0], [dn_1])
+    t2 = Config.configure_task("t2", nothing, [dn_1], [dn_2])
+    t3 = Config.configure_task("t3", nothing, [dn_2], [dn_3])
+    t2_bis = Config.configure_task("t2bis", nothing, [dn_1], [])
+    sc_conf = Config.configure_scenario("scenario", [t1, t2, t3, t2_bis])
+    return taipy.create_scenario(sc_conf)
+
+
+def test_cancel_job_no_subsequent_jobs():
+    job = create_job(Status.PENDING)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert job.is_canceled()
+
+
+def test_cancel_job_with_subsequent_blocked_jobs():
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job1 = orchestrator._lock_dn_output_and_create_job(scenario.t1, "s_id", "e_id")
+    job2 = orchestrator._lock_dn_output_and_create_job(scenario.t2, "s_id", "e_id")
+    job3 = orchestrator._lock_dn_output_and_create_job(scenario.t3, "s_id", "e_id")
+    job2bis = orchestrator._lock_dn_output_and_create_job(scenario.t2bis, "s_id", "e_id")
+    job1.pending()
+    job2.blocked()
+    job3.blocked()
+    job2bis.blocked()
+    orchestrator.blocked_jobs = [job2, job3, job2bis]
+
+    orchestrator.cancel_job(job1)
+
+    assert job1.is_canceled()
+    assert job2.is_abandoned()
+    assert job3.is_abandoned()
+    assert job2bis.is_abandoned()
+    assert not scenario.dn_0.edit_in_progress
+    assert not scenario.dn_1.edit_in_progress
+    assert not scenario.dn_2.edit_in_progress
+    assert not scenario.dn_3.edit_in_progress
+    assert orchestrator.blocked_jobs == []
+
+
+def test_cancel_job_with_subsequent_jobs_and_parallel_jobs():
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job1 = orchestrator._lock_dn_output_and_create_job(scenario.t1, "s_id", "e_id")
+    job2 = orchestrator._lock_dn_output_and_create_job(scenario.t2, "s_id", "e_id")
+    job3 = orchestrator._lock_dn_output_and_create_job(scenario.t3, "s_id", "e_id")
+    job2bis = orchestrator._lock_dn_output_and_create_job(scenario.t2bis, "s_id", "e_id")
+    job1.completed()
+
+    job2.running()
+    job3.blocked()
+    job2bis.pending()
+    orchestrator.blocked_jobs = [job3]
+
+    orchestrator.cancel_job(job2)
+
+    assert job1.is_completed()
+    assert job2.is_canceled()
+    assert job3.is_abandoned()
+    assert job2bis.is_pending()
+    assert not scenario.dn_2.edit_in_progress
+    assert not scenario.dn_3.edit_in_progress
+    assert orchestrator.blocked_jobs == []
+
+
+def test_cancel_blocked_job_with_subsequent_blocked_jobs():
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job1 = orchestrator._lock_dn_output_and_create_job(scenario.t1, "s_id", "e_id")
+    job2 = orchestrator._lock_dn_output_and_create_job(scenario.t2, "s_id", "e_id")
+    job3 = orchestrator._lock_dn_output_and_create_job(scenario.t3, "s_id", "e_id")
+    job2bis = orchestrator._lock_dn_output_and_create_job(scenario.t2bis, "s_id", "e_id")
+    job1.blocked()
+    job2.blocked()
+    job3.blocked()
+    job2bis.blocked()
+    orchestrator.blocked_jobs = [job2, job3, job2bis]
+
+    orchestrator.cancel_job(job1)
+
+    assert job1.is_canceled()
+    assert job2.is_abandoned()
+    assert job3.is_abandoned()
+    assert job2bis.is_abandoned()
+    assert not scenario.dn_0.edit_in_progress
+    assert not scenario.dn_1.edit_in_progress
+    assert not scenario.dn_2.edit_in_progress
+    assert not scenario.dn_3.edit_in_progress
+    assert orchestrator.blocked_jobs == []
+
+
+def test_cancel_failed_job():
+    job = create_job(Status.FAILED)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert not job.is_canceled()
+    assert job.is_failed()
+
+
+def test_cancel_abandoned_job():
+    job = create_job(Status.ABANDONED)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert not job.is_canceled()
+    assert job.is_abandoned()
+
+
+def test_cancel_canceled_job():
+    job = create_job(Status.CANCELED)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert job.is_canceled()
+
+
+def test_cancel_completed_job():
+    job = create_job(Status.COMPLETED)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert job.is_completed()
+
+
+def test_cancel_skipped_job():
+    job = create_job(Status.SKIPPED)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator.cancel_job(job)
+
+    assert job.is_skipped()

+ 156 - 0
tests/core/_orchestrator/test_orchestrator__is_blocked.py

@@ -0,0 +1,156 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def test_is_not_blocked_task_single_input():
+    inp = Config.configure_data_node("inp", default_data="DEFAULT")
+    t = Config.configure_task("the_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    res = orchestrator._is_blocked(scenario.the_task)
+
+    assert res is False
+
+
+def test_is_not_blocked_task_multiple_input_and_output():
+    dn_0 = Config.configure_data_node("in_0", default_data="THIS")
+    dn_1 = Config.configure_data_node("in_1", default_data="IS")
+    dn_2 = Config.configure_data_node("in_2", default_data="DEFAULT")
+    out = Config.configure_data_node("output")
+    t = Config.configure_task("the_task", nothing, [dn_0, dn_1, dn_2], [out])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    res = orchestrator._is_blocked(scenario.the_task)
+
+    assert res is False
+
+
+def test_is_blocked_task_single_input_no_data():
+    inp = Config.configure_data_node("inp")
+    t = Config.configure_task("the_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    res = orchestrator._is_blocked(scenario.the_task)
+
+    assert res is True
+
+
+def test_is_blocked_task_single_input_edit_in_progress():
+    input_dn_cfg = Config.configure_data_node("inp", default_data=51)
+    t_cfg = Config.configure_task("the_task", nothing, [input_dn_cfg])
+    sc_conf = Config.configure_scenario("scenario", [t_cfg])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    scenario.inp.lock_edit()
+
+    res = orchestrator._is_blocked(scenario.the_task)
+
+    assert res is True
+
+
+def test_is_blocked_task_multiple_input_no_data():
+    dn_0 = Config.configure_data_node("input_0", default_data="THIS")
+    dn_1 = Config.configure_data_node("input_1")
+    out = Config.configure_data_node("output")
+    t_config = Config.configure_task("the_task", nothing, [dn_0, dn_1], [out])
+    sc_conf = Config.configure_scenario("scenario", [t_config])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    res = orchestrator._is_blocked(scenario.the_task)
+
+    assert res is True
+
+
+def test_is_not_blocked_job_single_input():
+    inp = Config.configure_data_node("inp", default_data="DEFAULT")
+    t = Config.configure_task("the_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    res = orchestrator._is_blocked(job)
+
+    assert res is False
+
+
+def test_is_not_blocked_job_multiple_input_and_output():
+    in_0 = Config.configure_data_node("in_0", default_data="THIS")
+    in_1 = Config.configure_data_node("in_1", default_data="IS")
+    out = Config.configure_data_node("output")
+    t = Config.configure_task("the_task", nothing, [in_0, in_1], [out])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    res = orchestrator._is_blocked(job)
+
+    assert res is False
+
+
+def test_is_blocked_job_single_input_no_data():
+    inp = Config.configure_data_node("inp")
+    t = Config.configure_task("the_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    res = orchestrator._is_blocked(job)
+
+    assert res is True
+
+
+def test_is_blocked_job_single_input_edit_in_progress():
+    input_dn_cfg = Config.configure_data_node("inp", default_data="foo")
+    task_cfg = Config.configure_task("the_task", nothing, [input_dn_cfg])
+    sc_conf = Config.configure_scenario("scenario", [task_cfg])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    scenario.inp.lock_edit()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    res = orchestrator._is_blocked(job)
+
+    assert res is True
+
+
+def test_is_blocked_job_multiple_input_no_data():
+    dn_0 = Config.configure_data_node("in_0", default_data="THIS")
+    dn_1 = Config.configure_data_node("in_1", default_data="IS")
+    dn_2 = Config.configure_data_node("in_2")
+    out = Config.configure_data_node("output")
+    t = Config.configure_task("the_task", nothing, [dn_0, dn_1, dn_2], [out])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    res = orchestrator._is_blocked(job)
+
+    assert res is True

+ 94 - 0
tests/core/_orchestrator/test_orchestrator__lock_dn_output_and_create_job.py

@@ -0,0 +1,94 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator import _Orchestrator
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def test_lock_dn_and_create_job():
+    t = Config.configure_task("no_output", nothing, [], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    task = scenario.no_output
+    s_id = "submit_id"
+    entity_id = "scenario_id"
+    cbs = None
+    force = False
+
+    job = _OrchestratorFactory._build_orchestrator()._lock_dn_output_and_create_job(task, s_id, entity_id, cbs, force)
+
+    assert job.submit_id == s_id
+    assert job.submit_entity_id == entity_id
+    assert job.task == task
+    assert not job.force
+    assert len(job._subscribers) == 1
+    assert job._subscribers[0] == _Orchestrator._on_status_change
+    assert len(taipy.get_jobs()) == 1
+
+
+def test_lock_dn_and_create_job_with_callback_and_force():
+    t = Config.configure_task("no_output", nothing, [], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    task = scenario.no_output
+    s_id = "submit_id"
+    entity_id = "scenario_id"
+    cbs = [nothing]
+    force = True
+
+    job = _OrchestratorFactory._build_orchestrator()._lock_dn_output_and_create_job(task, s_id, entity_id, cbs, force)
+
+    assert job.submit_id == s_id
+    assert job.submit_entity_id == entity_id
+    assert job.task == task
+    assert job.force
+    assert len(job._subscribers) == 2
+    assert job._subscribers[0] == nothing
+    assert job._subscribers[1] == _Orchestrator._on_status_change
+    assert len(taipy.get_jobs()) == 1
+
+
+def test_lock_dn_and_create_job_one_output():
+    dn = Config.configure_data_node("output")
+    t = Config.configure_task("one_output", nothing, [], [dn])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    orchestrator._lock_dn_output_and_create_job(scenario.one_output, "submit_id", "scenario_id")
+
+    assert scenario.output.edit_in_progress
+
+
+def test_lock_dn_and_create_job_multiple_outputs_one_input():
+    dn_0 = Config.configure_data_node("input_0", default_data=0)
+    dn_1 = Config.configure_data_node("output_1")
+    dn_2 = Config.configure_data_node("output_2")
+    dn_3 = Config.configure_data_node("output_3")
+    t = Config.configure_task("one_output", nothing, [dn_0], [dn_1, dn_2, dn_3])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    orchestrator._lock_dn_output_and_create_job(scenario.one_output, "submit_id", "scenario_id")
+
+    assert not scenario.input_0.edit_in_progress
+    assert scenario.input_0.is_ready_for_reading
+    assert scenario.output_1.edit_in_progress
+    assert not scenario.output_1.is_ready_for_reading
+    assert scenario.output_2.edit_in_progress
+    assert not scenario.output_2.is_ready_for_reading
+    assert scenario.output_3.edit_in_progress
+    assert not scenario.output_3.is_ready_for_reading

+ 165 - 0
tests/core/_orchestrator/test_orchestrator__on_status_change.py

@@ -0,0 +1,165 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from random import random
+from unittest import mock
+
+import taipy
+from taipy import Status, Job, JobId
+from taipy.config import Config
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+from taipy.core.task._task_manager_factory import _TaskManagerFactory
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def create_job(id, status):
+    t_cfg = Config.configure_task("no_output", nothing, [], [])
+    t = _TaskManagerFactory._build_manager()._bulk_get_or_create([t_cfg])
+    job = Job(JobId(id), t[0], "", "")
+    _JobManagerFactory._build_manager()._set(job)
+    job.status = status
+    return job
+
+
+def create_job_from_task(id, task):
+    job = Job(JobId(id), task, "s", task.id)
+    _JobManagerFactory._build_manager()._set(job)
+    return job
+
+
+def create_scenario():
+    # dn_0 --> t1 --> dn_1 --> t2 --> dn_2
+    #  \
+    #   \--> t3
+    dn_0_cfg = Config.configure_pickle_data_node("dn_0")
+    dn_1_cfg = Config.configure_pickle_data_node("dn_1")
+    dn_2_cfg = Config.configure_pickle_data_node("dn_2")
+    t1_cfg = Config.configure_task("t1", nothing, [dn_0_cfg], [dn_1_cfg])
+    t2_cfg = Config.configure_task("t2", nothing, [dn_1_cfg], [dn_2_cfg])
+    t3_cfg = Config.configure_task("t3", nothing, [dn_0_cfg], [])
+    sc_conf = Config.configure_scenario("scenario_cfg", [t2_cfg, t1_cfg, t3_cfg])
+    return taipy.create_scenario(sc_conf)
+
+
+def test_on_status_change_on_running_job_does_nothing():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_1_blocked = create_job("1_blocked", Status.BLOCKED)
+    job_2_to_be_unblocked = create_job("to_be_unblocked", Status.BLOCKED)
+    job_3_blocked = create_job("3_blocked", Status.BLOCKED)
+    job_4_running = create_job("running_job", Status.RUNNING)
+    orchestrator.blocked_jobs.append(job_1_blocked)
+    orchestrator.blocked_jobs.append(job_2_to_be_unblocked)
+    orchestrator.blocked_jobs.append(job_3_blocked)
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._is_blocked") as mck:
+        orchestrator._on_status_change(job_4_running)
+
+        mck.assert_not_called()
+        assert job_1_blocked in orchestrator.blocked_jobs
+        assert job_1_blocked.is_blocked()
+        assert job_2_to_be_unblocked in orchestrator.blocked_jobs
+        assert job_2_to_be_unblocked.is_blocked()
+        assert job_3_blocked in orchestrator.blocked_jobs
+        assert job_3_blocked.is_blocked()
+        assert job_4_running.is_running()
+        assert len(orchestrator.blocked_jobs) == 3
+        assert orchestrator.jobs_to_run.qsize() == 0
+
+
+def test_on_status_change_on_completed_job():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_1_blocked = create_job("1_blocked", Status.BLOCKED)
+    job_2_to_be_unblocked = create_job("to_be_unblocked", Status.BLOCKED)
+    job_3_blocked = create_job("3_blocked", Status.BLOCKED)
+    job_4_completed = create_job("completed_job", Status.COMPLETED)
+    orchestrator.blocked_jobs.append(job_1_blocked)
+    orchestrator.blocked_jobs.append(job_2_to_be_unblocked)
+    orchestrator.blocked_jobs.append(job_3_blocked)
+
+    def mck_is_blocked(job):
+        if job.id == "to_be_unblocked":
+            return False
+        return True
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._is_blocked") as mck:
+        mck.side_effect = mck_is_blocked
+        orchestrator._on_status_change(job_4_completed)
+
+        assert job_1_blocked in orchestrator.blocked_jobs
+        assert job_1_blocked.is_blocked()
+        assert job_2_to_be_unblocked not in orchestrator.blocked_jobs
+        assert job_2_to_be_unblocked.is_pending()
+        assert job_3_blocked in orchestrator.blocked_jobs
+        assert job_3_blocked.is_blocked()
+        assert job_4_completed.is_completed()
+        assert len(orchestrator.blocked_jobs) == 2
+        assert orchestrator.jobs_to_run.qsize() == 1
+        assert orchestrator.jobs_to_run.get() == job_2_to_be_unblocked
+
+
+def test_on_status_change_on_skipped_job():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_1_blocked = create_job("1_blocked", Status.BLOCKED)
+    job_2_to_be_unblocked = create_job("to_be_unblocked", Status.BLOCKED)
+    job_3_blocked = create_job("3_blocked", Status.BLOCKED)
+    job_4_skipped = create_job("skipped_job", Status.SKIPPED)
+    orchestrator.blocked_jobs.append(job_1_blocked)
+    orchestrator.blocked_jobs.append(job_2_to_be_unblocked)
+    orchestrator.blocked_jobs.append(job_3_blocked)
+
+    def mck_is_blocked(job):
+        if job.id == "to_be_unblocked":
+            return False
+        return True
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._is_blocked") as mck:
+        mck.side_effect = mck_is_blocked
+
+        orchestrator._on_status_change(job_4_skipped)
+
+        # Assert that when the status is skipped, the unblock jobs mechanism is executed
+        assert job_1_blocked in orchestrator.blocked_jobs
+        assert job_1_blocked.is_blocked()
+        assert job_2_to_be_unblocked not in orchestrator.blocked_jobs
+        assert job_2_to_be_unblocked.is_pending()
+        assert job_3_blocked in orchestrator.blocked_jobs
+        assert job_3_blocked.is_blocked()
+        assert job_4_skipped.is_skipped()
+        assert len(orchestrator.blocked_jobs) == 2
+        assert orchestrator.jobs_to_run.qsize() == 1
+        assert orchestrator.jobs_to_run.get() == job_2_to_be_unblocked
+
+
+def test_on_status_change_on_failed_job():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    scenario = create_scenario()
+    j1 = create_job_from_task("j1", scenario.t1)
+    j1.status = Status.FAILED
+    j2 = create_job_from_task("j2", scenario.t2)
+    j2.status = Status.BLOCKED
+    j3 = create_job_from_task("j3", scenario.t3)
+    j3.status = Status.BLOCKED
+    orchestrator.blocked_jobs.append(j2)
+    orchestrator.blocked_jobs.append(j3)
+
+    orchestrator._on_status_change(j1)
+
+    # Assert that when the status is skipped, the unblock jobs mechanism is executed
+    assert j1.is_failed()
+    assert j2 not in orchestrator.blocked_jobs
+    assert j2.is_abandoned()
+    assert j3 in orchestrator.blocked_jobs
+    assert j3.is_blocked()
+    assert len(orchestrator.blocked_jobs) == 1
+    assert orchestrator.jobs_to_run.qsize() == 0

+ 78 - 0
tests/core/_orchestrator/test_orchestrator__orchestrate_job_to_run_or_block.py

@@ -0,0 +1,78 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from taipy import Status
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.job._job_manager_factory import _JobManagerFactory
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def test_orchestrate_job_to_run_or_block_single_blocked_job():
+    inp = Config.configure_data_node("inp")  # No default data
+    t = Config.configure_task("the_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.the_task, [nothing], "s_id", "e_id")
+
+    orchestrator._orchestrate_job_to_run_or_block([job])
+
+    assert len(orchestrator.blocked_jobs) == 1
+    assert job.status == Status.BLOCKED
+    assert orchestrator.jobs_to_run.empty()
+
+
+def test_orchestrate_job_to_run_or_block_single_pending_job():
+    inp = Config.configure_data_node("inp", default_data=1)  # Has default data
+    t = Config.configure_task("my_task", nothing, [inp], [])
+    sc_conf = Config.configure_scenario("scenario", [t])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job = _JobManagerFactory._build_manager()._create(scenario.my_task, [nothing], "s_id", "e_id")
+
+    orchestrator._orchestrate_job_to_run_or_block([job])
+
+    assert len(orchestrator.blocked_jobs) == 0
+    assert job.status == Status.PENDING
+    assert orchestrator.jobs_to_run.qsize() == 1
+
+
+def test_orchestrate_job_to_run_or_block_multiple_jobs():
+    input = Config.configure_data_node("input", default_data=1)  # Has default data
+    intermediate = Config.configure_data_node("intermediate")  # Has default data
+    output = Config.configure_data_node("output")  # Has default data
+    t1 = Config.configure_task("my_task_1", nothing, [input], [])
+    t2 = Config.configure_task("my_task_2", nothing, [], [intermediate])
+    t3 = Config.configure_task("my_task_3", nothing, [intermediate], [output])
+    sc_conf = Config.configure_scenario("scenario", [t1, t2, t3])
+    scenario = taipy.create_scenario(sc_conf)
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    job_1 = _JobManagerFactory._build_manager()._create(scenario.my_task_1, [nothing], "s_id", "e_id")
+    job_2 = _JobManagerFactory._build_manager()._create(scenario.my_task_2, [nothing], "s_id", "e_id")
+    job_3 = _JobManagerFactory._build_manager()._create(scenario.my_task_3, [nothing], "s_id", "e_id")
+
+    orchestrator._orchestrate_job_to_run_or_block([job_1, job_2, job_3])
+
+    assert orchestrator.jobs_to_run.qsize() == 2
+    assert job_1.status == Status.PENDING
+    assert job_2.status == Status.PENDING
+    assert len(orchestrator.blocked_jobs) == 1
+    assert job_3.status == Status.BLOCKED
+
+
+def test_orchestrate_job_to_run_or_block__no_job_doesnot_raise_error():
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    orchestrator._orchestrate_job_to_run_or_block([])

+ 486 - 0
tests/core/_orchestrator/test_orchestrator__submit.py

@@ -0,0 +1,486 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from datetime import datetime, timedelta
+from unittest import mock
+
+import freezegun
+import pytest
+
+from taipy import Scope, Task, Scenario
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator import _Orchestrator
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.config import JobConfig
+from taipy.core.data import PickleDataNode
+from taipy.core.data._data_manager import _DataManager
+from taipy.core.scenario._scenario_manager import _ScenarioManager
+from taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory
+from taipy.core.submission.submission import Submission
+from taipy.core.submission.submission_status import SubmissionStatus
+from taipy.core.task._task_manager import _TaskManager
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def create_scenario():
+    # dn_0 --> t1 --> dn_1 --> t2 --> dn_2 --> t3 --> dn_3
+    #                  \
+    #                   \--> t2_bis
+    dn_0_cfg = Config.configure_pickle_data_node("dn_0")
+    dn_1_cfg = Config.configure_pickle_data_node("dn_1")
+    dn_2_cfg = Config.configure_pickle_data_node("dn_2")
+    dn_3_cfg = Config.configure_pickle_data_node("dn_3")
+    t1_cfg = Config.configure_task("t_1", nothing, [dn_0_cfg], [dn_1_cfg], skippable=True)
+    t2_cfg = Config.configure_task("t_2", nothing, [dn_1_cfg], [dn_2_cfg])
+    t3_cfg = Config.configure_task("t_3", nothing, [dn_2_cfg], [dn_3_cfg])
+    t2_bis_cfg = Config.configure_task("t_2bis", nothing, [dn_1_cfg], [])
+    sc_conf = Config.configure_scenario("scenario_cfg", [t2_cfg, t1_cfg, t3_cfg, t2_bis_cfg])
+    return taipy.create_scenario(sc_conf)
+
+
+def test_submit_scenario_development_mode():
+    scenario = create_scenario()
+    scenario.dn_0.write(0)  # input data is made ready
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now() + timedelta(seconds=1)  # +1 to ensure the edit time of dn_0 is before the submit time
+    with freezegun.freeze_time(submit_time):
+        jobs = orchestrator.submit(scenario)  # scenario is executed directly in development mode
+
+    # data nodes should have been written (except the input dn_0)
+    assert scenario.dn_0.last_edit_date < submit_time
+    assert scenario.dn_1.last_edit_date == submit_time
+    assert scenario.dn_2.last_edit_date == submit_time
+    assert scenario.dn_3.last_edit_date == submit_time
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) == 4
+    # t1
+    job_1 = jobs[0]
+    assert job_1.task == scenario.t_1
+    assert not job_1.force
+    assert job_1.is_completed()
+    assert job_1.submit_entity_id == scenario.id
+    assert job_1.creation_date == submit_time
+    assert job_1.stacktrace == []
+    assert len(job_1._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_1._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_1._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t2 or t2_bis
+    job_2 = jobs[1]
+    assert job_2.task == scenario.t_2 or job_2.task == scenario.t_2bis
+    assert not job_2.force
+    assert job_2.is_completed()
+    assert job_2.submit_entity_id == scenario.id
+    assert job_2.creation_date == submit_time
+    assert job_2.stacktrace == []
+    assert len(job_2._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t2_bis or t2
+    job_2bis = jobs[2]
+    assert job_2bis.task == scenario.t_2bis or job_2bis.task == scenario.t_2
+    assert job_2bis.is_completed()
+    assert not job_2bis.force
+    assert job_2bis.submit_entity_id == scenario.id
+    assert job_2bis.creation_date == submit_time
+    assert len(job_2bis._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2bis._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2bis._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_2bis.stacktrace == []
+    # t3
+    job_3 = jobs[3]
+    assert job_3.task == scenario.t_3
+    assert not job_3.force
+    assert job_3.is_completed()
+    assert job_3.submit_entity_id == scenario.id
+    assert job_3.creation_date == submit_time
+    assert len(job_3._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_3._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_3._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_3.stacktrace == []
+
+    assert job_1.submit_id == job_2.submit_id == job_2bis.submit_id == job_3.submit_id
+
+    # submission is created and correct
+    submission = _SubmissionManagerFactory._build_manager()._get(job_1.submit_id)
+    assert len(_SubmissionManagerFactory._build_manager()._get_all()) == 1
+    assert submission.submission_status == SubmissionStatus.COMPLETED
+    assert submission.jobs == jobs
+    assert submission.creation_date == submit_time
+    assert submission.entity_id == scenario.id
+    assert submission.entity_type == "SCENARIO"
+    assert submission.entity_config_id == "scenario_cfg"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 0
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+def test_submit_scenario_development_mode_blocked_jobs():
+    scenario = create_scenario()  # input data is not ready
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    s_time = datetime.now() + timedelta(seconds=1)  # +1 to ensure the scenario creation is before the submit time
+    with freezegun.freeze_time(s_time):
+        jobs = orchestrator.submit(scenario)  # first task is blocked because input is not ready
+
+    # dn should be locked for edition
+    assert scenario.dn_2.edit_in_progress
+    assert scenario.dn_2.edit_in_progress
+    assert scenario.dn_3.edit_in_progress
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) is 4
+    # t1
+    job_1 = jobs[0]
+    assert job_1.task == scenario.t_1
+    assert not job_1.force
+    assert job_1.is_blocked()
+    assert job_1.submit_entity_id == scenario.id
+    assert job_1.creation_date == s_time
+    assert job_1.stacktrace == []
+    assert len(job_1._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_1._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_1._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t2 or t2_bis
+    job_2 = jobs[1]
+    assert job_2.task == scenario.t_2 or job_2.task == scenario.t_2bis
+    assert not job_2.force
+    assert job_2.is_blocked()
+    assert job_2.submit_entity_id == scenario.id
+    assert job_2.creation_date == s_time
+    assert job_2.stacktrace == []
+    assert len(job_2._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t2_bis or t2
+    job_2bis = jobs[2]
+    assert job_2bis.task == scenario.t_2bis or job_2bis.task == scenario.t_2
+    assert job_2bis.is_blocked()
+    assert job_2bis.submit_entity_id == scenario.id
+    assert not job_2bis.force
+    assert job_2bis.creation_date == s_time
+    assert len(job_2bis._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2bis._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2bis._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_2bis.stacktrace == []
+    # t3
+    job_3 = jobs[3]
+    assert job_3.task == scenario.t_3
+    assert not job_3.force
+    assert job_3.is_blocked()
+    assert job_3.submit_entity_id == scenario.id
+    assert job_3.creation_date == s_time
+    assert job_3.stacktrace == []
+    assert len(job_3._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_3._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_3._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+
+    # Same submit_id
+    assert job_1.submit_id == job_2.submit_id == job_2bis.submit_id == job_3.submit_id
+
+    # submission is created and correct
+    assert len(_SubmissionManagerFactory._build_manager()._get_all()) == 1
+    submission = _SubmissionManagerFactory._build_manager()._get(job_1.submit_id)
+    assert submission.submission_status == SubmissionStatus.BLOCKED
+    assert submission.jobs == jobs
+    assert submission.creation_date == s_time
+    assert submission.entity_id == scenario.id
+    assert submission.entity_type == "SCENARIO"
+    assert submission.entity_config_id == "scenario_cfg"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 4
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+@pytest.mark.standalone
+def test_submit_scenario_standalone_mode():
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    sc = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+    sc.dn_0.write(0)  # input data is made ready
+    submit_time = datetime.now() + timedelta(seconds=1)  # +1 to ensure the edit time of dn_0 is before the submit time
+    with freezegun.freeze_time(submit_time):
+        jobs = orchestrator.submit(sc)  # No dispatcher running. sc is not executed.
+
+    # task output should be locked for edition
+    assert sc.dn_1.edit_in_progress
+    assert sc.dn_2.edit_in_progress
+    assert sc.dn_3.edit_in_progress
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) == 4
+    # t1
+    job_1 = jobs[0]
+    assert job_1.task == sc.t_1
+    assert not job_1.force
+    assert job_1.is_pending()
+    assert job_1.creation_date == submit_time
+    assert job_1.submit_entity_id == sc.id
+    assert len(job_1._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_1._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_1._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_1.stacktrace == []
+    # t2 or t2_bis
+    job_2 = jobs[1]
+    assert job_2.task == sc.t_2 or job_2.task == sc.t_2bis
+    assert job_2.is_blocked()
+    assert not job_2.force
+    assert job_2.submit_entity_id == sc.id
+    assert job_2.creation_date == submit_time
+    assert job_2.stacktrace == []
+    assert len(job_2._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_2._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    # t2_bis or t2
+    job_2bis = jobs[2]
+    assert job_2bis.task == sc.t_2bis or job_2bis.task == sc.t_2
+    assert job_2bis.is_blocked()
+    assert not job_2bis.force
+    assert job_2bis.submit_entity_id == sc.id
+    assert len(job_2bis._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2bis._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_2bis._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2bis.creation_date == submit_time
+    assert job_2bis.stacktrace == []
+    # t3
+    job_3 = jobs[3]
+    assert job_3.task == sc.t_3
+    assert not job_3.force
+    assert job_3.is_blocked()
+    assert job_3.submit_entity_id == sc.id
+    assert len(job_3._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_3._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_3._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_3.creation_date == submit_time
+    assert job_3.stacktrace == []
+
+    assert job_1.submit_id == job_2.submit_id == job_2bis.submit_id == job_3.submit_id
+
+    # submission is created and correct
+    submission = _SubmissionManagerFactory._build_manager()._get(job_1.submit_id)
+    assert len(_SubmissionManagerFactory._build_manager()._get_all()) == 1
+    assert submission.submission_status == SubmissionStatus.PENDING
+    assert submission.jobs == jobs
+    assert submission.creation_date == submit_time
+    assert submission.entity_id == sc.id
+    assert submission.entity_type == "SCENARIO"
+    assert submission.entity_config_id == "scenario_cfg"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 3
+    assert orchestrator.jobs_to_run.qsize() == 1
+
+
+def test_submit_scenario_with_callbacks_and_force_and_wait():
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._wait_until_job_finished") as mck:
+        jobs = orchestrator.submit(scenario, callbacks=[nothing], force=True, wait=True, timeout=5)
+
+        # jobs are created in a specific order and are correct
+        assert len(jobs) == 4
+        assert len(jobs[0]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+        assert jobs[0]._subscribers[0].__code__ == nothing.__code__
+        assert jobs[0]._subscribers[1].__code__ == Submission._update_submission_status.__code__
+        assert jobs[0]._subscribers[2].__code__ == _Orchestrator._on_status_change.__code__
+        assert len(jobs[1]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+        assert jobs[1]._subscribers[0].__code__ == nothing.__code__
+        assert jobs[1]._subscribers[1].__code__ == Submission._update_submission_status.__code__
+        assert jobs[1]._subscribers[2].__code__ == _Orchestrator._on_status_change.__code__
+        assert len(jobs[2]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+        assert jobs[2]._subscribers[0].__code__ == nothing.__code__
+        assert jobs[2]._subscribers[1].__code__ == Submission._update_submission_status.__code__
+        assert jobs[2]._subscribers[2].__code__ == _Orchestrator._on_status_change.__code__
+        mck.assert_called_once_with(jobs, timeout=5)
+
+
+def test_submit_sequence_development_mode():
+    sce = create_scenario()
+    sce.add_sequence("seq", [sce.t_1, sce.t_2, sce.t_3])
+    seq = sce.sequences["seq"]
+    sce.dn_0.write(0)  # input data is made ready
+
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now() + timedelta(seconds=1)  # +1 to ensure the edit time of dn_0 is before the submit time
+    with freezegun.freeze_time(submit_time):
+        jobs = orchestrator.submit(seq)  # sequence is executed directly in development mode
+
+    # data nodes should have been written (except the input dn_0)
+    assert sce.dn_0.last_edit_date < submit_time
+    assert sce.dn_1.last_edit_date == submit_time == sce.dn_2.last_edit_date == sce.dn_3.last_edit_date
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) == 3
+    # t1
+    job_1 = jobs[0]
+    assert job_1.task == sce.t_1
+    assert not job_1.force
+    assert job_1.is_completed()
+    assert job_1.submit_entity_id == seq.id
+    assert job_1.creation_date == submit_time
+    assert job_1.stacktrace == []
+    assert len(job_1._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_1._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_1._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t2
+    job_2 = jobs[1]
+    assert job_2.task == sce.t_2
+    assert not job_2.force
+    assert job_2.is_completed()
+    assert job_2.submit_entity_id == seq.id
+    assert job_2.creation_date == submit_time
+    assert job_2.stacktrace == []
+    assert len(job_2._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_2._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_2._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    # t3
+    job_3 = jobs[2]
+    assert job_3.task == sce.t_3
+    assert not job_3.force
+    assert job_3.is_completed()
+    assert job_3.submit_entity_id == seq.id
+    assert len(job_3._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_3._subscribers[0].__code__ == Submission._update_submission_status.__code__
+    assert job_3._subscribers[1].__code__ == _Orchestrator._on_status_change.__code__
+    assert job_3.creation_date == submit_time
+    assert job_3.stacktrace == []
+
+    assert job_1.submit_id == job_2.submit_id == job_3.submit_id
+
+    # submission is created and correct
+    submit_id = job_2.submit_id
+    submission = _SubmissionManagerFactory._build_manager()._get(submit_id)
+    assert len(_SubmissionManagerFactory._build_manager()._get_all()) == 1
+    assert submission.entity_type == "SEQUENCE"
+    assert submission.submission_status == SubmissionStatus.COMPLETED
+    assert submission.entity_config_id is None
+    assert submission.jobs == jobs
+    assert submission.creation_date == submit_time
+    assert submission.entity_id == seq.id
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 0
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+@pytest.mark.standalone
+def test_submit_sequence_standalone_mode():
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    scenario = create_scenario()
+    scenario.dn_0.write(0)  # input data is made ready
+    scenario.add_sequence("seq", [scenario.t_1, scenario.t_2, scenario.t_3])
+    sequence = scenario.sequences["seq"]
+
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now() + timedelta(seconds=1)  # +1 to ensure the edit time of dn_0 is before the submit time
+    with freezegun.freeze_time(submit_time):
+        jobs = orchestrator.submit(sequence)  # sequence is executed directly in development mode
+
+    assert scenario.dn_1.edit_in_progress
+    assert scenario.dn_2.edit_in_progress
+    assert scenario.dn_3.edit_in_progress
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) == 3
+    # t1
+    job_1 = jobs[0]
+    assert job_1.task == scenario.t_1
+    assert not job_1.force
+    assert job_1.is_pending()
+    assert job_1.creation_date == submit_time
+    assert job_1.submit_entity_id == sequence.id
+    assert job_1.stacktrace == []
+    assert len(job_1._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    # t2
+    job_2 = jobs[1]
+    assert job_2.task == scenario.t_2
+    assert not job_2.force
+    assert job_2.is_blocked()
+    assert job_2.submit_entity_id == sequence.id
+    assert job_2.creation_date == submit_time
+    assert job_2.stacktrace == []
+    assert len(job_2._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    # t3
+    job_3 = jobs[2]
+    assert job_3.task == scenario.t_3
+    assert not job_3.force
+    assert job_3.is_blocked()
+    assert job_3.creation_date == submit_time
+    assert job_3.submit_entity_id == sequence.id
+    assert len(job_3._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job_3.stacktrace == []
+
+    assert job_1.submit_id == job_2.submit_id == job_3.submit_id
+
+    # submission is created and correct
+    submit_id = job_2.submit_id
+    submission = _SubmissionManagerFactory._build_manager()._get(submit_id)
+    assert len(_SubmissionManagerFactory._build_manager()._get_all()) == 1
+    assert submission.submission_status == SubmissionStatus.PENDING
+    assert submission.entity_type == "SEQUENCE"
+    assert submission.entity_config_id is None
+    assert submission.jobs == jobs
+    assert submission.creation_date == submit_time
+    assert submission.entity_id == sequence.id
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 2
+    assert orchestrator.jobs_to_run.qsize() == 1
+
+
+@pytest.mark.standalone
+def test_submit_sequence_with_callbacks_and_force_and_wait():
+    Config.configure_job_executions(mode="standalone")
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._wait_until_job_finished") as mck:
+        jobs = orchestrator.submit(scenario, callbacks=[nothing], force=True, wait=True, timeout=5)
+        mck.assert_called_once_with(jobs, timeout=5)
+
+    # jobs are created in a specific order and are correct
+    assert len(jobs) == 4
+    assert len(jobs[0]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+    assert len(jobs[1]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+    assert len(jobs[2]._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+
+
+def test_submit_submittable_generate_unique_submit_id():
+    dn_1 = PickleDataNode("dn_config_id_1", Scope.SCENARIO)
+    dn_2 = PickleDataNode("dn_config_id_2", Scope.SCENARIO)
+    task_1 = Task("task_config_id_1", {}, print, [dn_1])
+    task_2 = Task("task_config_id_2", {}, print, [dn_1], [dn_2])
+
+    _DataManager._set(dn_1)
+    _DataManager._set(dn_2)
+    _TaskManager._set(task_1)
+    _TaskManager._set(task_2)
+
+    scenario = Scenario("scenario", {task_1, task_2}, {})
+    _ScenarioManager._set(scenario)
+
+    jobs_1 = taipy.submit(scenario)
+    jobs_2 = taipy.submit(scenario)
+    assert len(jobs_1) == 2
+    assert len(jobs_2) == 2
+    assert jobs_1[0].submit_id == jobs_1[1].submit_id
+    assert jobs_2[0].submit_id == jobs_2[1].submit_id
+    assert jobs_1[0].submit_id != jobs_2[0].submit_id

+ 205 - 0
tests/core/_orchestrator/test_orchestrator__submit_task.py

@@ -0,0 +1,205 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+from datetime import datetime
+from unittest import mock
+
+import freezegun
+import pytest
+
+from taipy.config import Config
+from taipy.core import taipy
+from taipy.core._orchestrator._orchestrator import _Orchestrator
+from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
+from taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory
+from taipy.core.submission.submission_status import SubmissionStatus
+
+
+def nothing(*args, **kwargs):
+    pass
+
+
+def create_scenario():
+    # dn_0 --> t1 --> dn_1 --> t2 --> dn_2 --> t3 --> dn_3
+    #                  \
+    #                   \--> t2_bis
+    dn_0 = Config.configure_data_node("dn_0", default_data=0)
+    dn_1 = Config.configure_data_node("dn_1")
+    dn_2 = Config.configure_data_node("dn_2")
+    dn_3 = Config.configure_data_node("dn_3")
+    t1 = Config.configure_task("t1", nothing, [dn_0], [dn_1], skippable=True)
+    t2 = Config.configure_task("t2", nothing, [dn_1], [dn_2])
+    t3 = Config.configure_task("t3", nothing, [dn_2], [dn_3])
+    t2_bis = Config.configure_task("t2bis", nothing, [dn_1], [])
+    sc_conf = Config.configure_scenario("scenario", [t1, t2, t3, t2_bis])
+    return taipy.create_scenario(sc_conf)
+
+
+def test_submit_task_development_mode():
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now()
+    with freezegun.freeze_time(submit_time):
+        job = orchestrator.submit_task(scenario.t1)  # t1 is executed directly in development mode
+
+    # task output should have been written
+    assert scenario.dn_1.last_edit_date == submit_time
+
+    # job exists and is correct
+    assert job.task == scenario.t1
+    assert not job.force
+    assert job.is_completed()
+    assert job.submit_entity_id == scenario.t1.id
+    assert job.creation_date == submit_time
+    assert job.stacktrace == []
+    assert len(job._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+
+    # submission is created and correct
+    all_submissions = _SubmissionManagerFactory._build_manager()._get_all()
+    assert len(all_submissions) == 1
+    assert all_submissions[0].creation_date == submit_time
+    assert all_submissions[0].submission_status == SubmissionStatus.COMPLETED
+    assert all_submissions[0].jobs == [job]
+    assert all_submissions[0].entity_id == scenario.t1.id
+    assert all_submissions[0].entity_type == "TASK"
+    assert all_submissions[0].entity_config_id == "t1"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 0
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+def test_submit_task_development_mode_blocked_job():
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now()
+    with freezegun.freeze_time(submit_time):
+        job = orchestrator.submit_task(scenario.t2)  # t1 is executed directly in development mode
+
+    # task output should have been written
+    assert scenario.dn_2.edit_in_progress
+
+    # job exists and is correct
+    assert job.task == scenario.t2
+    assert not job.force
+    assert job.is_blocked()  # input data is not ready
+    assert job.submit_entity_id == scenario.t2.id
+    assert job.creation_date == submit_time
+    assert len(job._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job.stacktrace == []
+
+    # submission is created and correct
+    submission = _SubmissionManagerFactory._build_manager()._get(job.submit_id)
+    assert submission.submission_status == SubmissionStatus.BLOCKED
+    assert submission.creation_date == submit_time
+    assert submission.jobs == [job]
+    assert submission.entity_id == scenario.t2.id
+    assert submission.entity_type == "TASK"
+    assert submission.entity_config_id == "t2"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 1
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+@pytest.mark.standalone
+def test_submit_task_standalone_mode():
+    Config.configure_job_executions(mode="standalone")
+    sc = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now()
+    with freezegun.freeze_time(submit_time):
+        job = orchestrator.submit_task(sc.t1)  # No dispatcher running. t1 is not executed in standalone mode.
+
+    # task output should NOT have been written
+    assert sc.dn_1.last_edit_date is None
+
+    # task output should be locked for edition
+    assert sc.dn_1.edit_in_progress
+
+    # job exists and is correct
+    assert job.creation_date == submit_time
+    assert job.task == sc.t1
+    assert not job.force
+    assert job.is_pending()
+    assert job.submit_entity_id == sc.t1.id
+    assert len(job._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job.stacktrace == []
+
+    # submission is created and correct
+    submission = _SubmissionManagerFactory._build_manager()._get(job.submit_id)
+    assert submission.creation_date == submit_time
+    assert submission.submission_status == SubmissionStatus.PENDING
+    assert submission.jobs == [job]
+    assert submission.entity_id == sc.t1.id
+    assert submission.entity_type == "TASK"
+    assert submission.entity_config_id == "t1"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 0
+    assert orchestrator.jobs_to_run.qsize() == 1
+
+
+@pytest.mark.standalone
+def test_submit_task_standalone_mode_blocked_job():
+    Config.configure_job_executions(mode="standalone")
+    sc = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    submit_time = datetime.now()
+    with freezegun.freeze_time(submit_time):
+        job = orchestrator.submit_task(sc.t2)  # No dispatcher running. t2 is not executed in standalone mode.
+
+    # task output should NOT have been written
+    assert sc.dn_2.last_edit_date is None
+
+    # task output should be locked for edition
+    assert sc.dn_2.edit_in_progress
+
+    # job exists and is correct
+    assert job.creation_date == submit_time
+    assert job.task == sc.t2
+    assert not job.force
+    assert job.is_blocked()  # input data is not ready
+    assert job.stacktrace == []
+    assert len(job._subscribers) == 2  # submission._update_submission_status and orchestrator._on_status_change
+    assert job.submit_entity_id == sc.t2.id
+
+    # submission is created and correct
+    submission = _SubmissionManagerFactory._build_manager()._get(job.submit_id)
+    assert submission.creation_date == submit_time
+    assert submission.submission_status == SubmissionStatus.BLOCKED
+    assert submission.jobs == [job]
+    assert submission.entity_id == sc.t2.id
+    assert submission.entity_type == "TASK"
+    assert submission.entity_config_id == "t2"
+
+    # orchestrator state is correct
+    assert len(orchestrator.blocked_jobs) == 1
+    assert orchestrator.jobs_to_run.qsize() == 0
+
+
+@pytest.mark.standalone
+def test_submit_task_with_callbacks_and_force_and_wait():
+    Config.configure_job_executions(mode="standalone")
+    scenario = create_scenario()
+    orchestrator = _OrchestratorFactory._build_orchestrator()
+
+    with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator._wait_until_job_finished") as mck:
+        job = orchestrator.submit_task(scenario.t1, callbacks=[nothing], force=True, wait=True, timeout=2)
+
+        # job exists and is correct
+        assert job.task == scenario.t1
+        assert job.force
+        assert len(job._subscribers) == 3  # nothing, _update_submission_status, and _on_status_change
+        mck.assert_called_once_with(job, timeout=2)

+ 89 - 39
tests/core/_orchestrator/test_orchestrator_factory.py

@@ -12,11 +12,13 @@
 from unittest import mock
 from unittest import mock
 
 
 import pytest
 import pytest
+
 from taipy.config import Config
 from taipy.config import Config
-from taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _JobDispatcher, _StandaloneJobDispatcher
+from taipy.core._orchestrator._dispatcher import _DevelopmentJobDispatcher, _StandaloneJobDispatcher
 from taipy.core._orchestrator._orchestrator import _Orchestrator
 from taipy.core._orchestrator._orchestrator import _Orchestrator
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
 from taipy.core.config.job_config import JobConfig
 from taipy.core.config.job_config import JobConfig
+from taipy.core.exceptions import ModeNotAvailable
 from taipy.core.exceptions.exceptions import OrchestratorNotBuilt
 from taipy.core.exceptions.exceptions import OrchestratorNotBuilt
 
 
 
 
@@ -24,62 +26,110 @@ def test_build_orchestrator():
     _OrchestratorFactory._orchestrator = None
     _OrchestratorFactory._orchestrator = None
     _OrchestratorFactory._dispatcher = None
     _OrchestratorFactory._dispatcher = None
 
 
-    assert _OrchestratorFactory._orchestrator is None
-    assert _OrchestratorFactory._dispatcher is None
+    with mock.patch("taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher") as bd:
+        with mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator.initialize") as initialize:
+            orchestrator = _OrchestratorFactory._build_orchestrator()
+            assert orchestrator == _Orchestrator
+            assert _OrchestratorFactory._orchestrator == _Orchestrator
+            initialize.assert_called_once()
+            bd.assert_not_called()
 
 
-    orchestrator = _OrchestratorFactory._build_orchestrator()
-    assert orchestrator == _Orchestrator
-    assert _OrchestratorFactory._orchestrator == _Orchestrator
-    dispatcher = _OrchestratorFactory._build_dispatcher()
-    assert isinstance(dispatcher, _JobDispatcher)
-    assert isinstance(_OrchestratorFactory._dispatcher, _JobDispatcher)
 
 
+def test_build_dispatcher_no_orchestrator():
     _OrchestratorFactory._orchestrator = None
     _OrchestratorFactory._orchestrator = None
-    assert _OrchestratorFactory._orchestrator is None
-    assert _OrchestratorFactory._dispatcher is not None
+    _OrchestratorFactory._dispatcher = None
+    with pytest.raises(OrchestratorNotBuilt):
+        _OrchestratorFactory._build_dispatcher()
+        assert _OrchestratorFactory._dispatcher is None
 
 
-    with mock.patch(
-        "taipy.core._orchestrator._orchestrator_factory._OrchestratorFactory._build_dispatcher"
-    ) as build_dispatcher, mock.patch("taipy.core._orchestrator._orchestrator._Orchestrator.initialize") as initialize:
-        orchestrator = _OrchestratorFactory._build_orchestrator()
-        assert orchestrator == _Orchestrator
-        assert _OrchestratorFactory._orchestrator == _Orchestrator
-        build_dispatcher.assert_not_called()
-        initialize.assert_called_once()
+
+def test_build_dispatcher_default():
+    _OrchestratorFactory._orchestrator = None
+    _OrchestratorFactory._dispatcher = None
+    _OrchestratorFactory._build_orchestrator()
+    _OrchestratorFactory._build_dispatcher()
+    assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher)
 
 
 
 
 def test_build_development_dispatcher():
 def test_build_development_dispatcher():
     Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
     Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
     _OrchestratorFactory._orchestrator = None
     _OrchestratorFactory._orchestrator = None
     _OrchestratorFactory._dispatcher = None
     _OrchestratorFactory._dispatcher = None
-
-    assert _OrchestratorFactory._orchestrator is None
-    assert _OrchestratorFactory._dispatcher is None
-
-    with pytest.raises(OrchestratorNotBuilt):
-        _OrchestratorFactory._build_dispatcher()
-
     _OrchestratorFactory._build_orchestrator()
     _OrchestratorFactory._build_orchestrator()
-    assert _OrchestratorFactory._orchestrator is not None
-    assert _OrchestratorFactory._dispatcher is None
-
     _OrchestratorFactory._build_dispatcher()
     _OrchestratorFactory._build_dispatcher()
     assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher)
     assert isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher)
 
 
 
 
+@pytest.mark.standalone
 def test_build_standalone_dispatcher():
 def test_build_standalone_dispatcher():
-    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    _OrchestratorFactory._orchestrator = None
+    _OrchestratorFactory._dispatcher = None
+    _OrchestratorFactory._build_orchestrator()
     _OrchestratorFactory._build_dispatcher()
     _OrchestratorFactory._build_dispatcher()
     assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
     assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
-    assert not isinstance(_OrchestratorFactory._dispatcher, _DevelopmentJobDispatcher)
     assert _OrchestratorFactory._dispatcher.is_running()
     assert _OrchestratorFactory._dispatcher.is_running()
-    assert _OrchestratorFactory._dispatcher._nb_available_workers == 2
-    _OrchestratorFactory._dispatcher._nb_available_workers = 1
+    _OrchestratorFactory._dispatcher.stop()
 
 
-    _OrchestratorFactory._build_dispatcher(force_restart=False)
-    assert _OrchestratorFactory._dispatcher.is_running()
-    assert _OrchestratorFactory._dispatcher._nb_available_workers == 1
 
 
-    _OrchestratorFactory._build_dispatcher(force_restart=True)
-    assert _OrchestratorFactory._dispatcher.is_running()
-    assert _OrchestratorFactory._dispatcher._nb_available_workers == 2
+@pytest.mark.standalone
+def test_rebuild_standalone_dispatcher_and_force_restart():
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    _OrchestratorFactory._build_orchestrator()
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher.start") as start_mock:
+        with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher.stop") as stop_mock:
+            _OrchestratorFactory._build_dispatcher()
+            assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
+            start_mock.assert_called_once()
+            stop_mock.assert_not_called()
+
+    with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher.start") as start_mock:
+        with mock.patch("taipy.core._orchestrator._dispatcher._job_dispatcher._JobDispatcher.stop") as stop_mock:
+            _OrchestratorFactory._build_dispatcher()  # Default force_restart=False
+            assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
+            stop_mock.assert_not_called()
+            start_mock.assert_not_called()
+
+            _OrchestratorFactory._build_dispatcher(force_restart=False)
+            assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
+            stop_mock.assert_not_called()
+            start_mock.assert_not_called()
+
+            _OrchestratorFactory._build_dispatcher(force_restart=True)
+            assert isinstance(_OrchestratorFactory._dispatcher, _StandaloneJobDispatcher)
+            stop_mock.assert_called_once()
+            start_mock.assert_called_once()
+    _OrchestratorFactory._dispatcher.stop()
+
+
+def test_build_unknown_dispatcher():
+    Config.configure_job_executions(mode="UNKNOWN")
+    _OrchestratorFactory._build_orchestrator()
+    with pytest.raises(ModeNotAvailable):
+        _OrchestratorFactory._build_dispatcher()
+        assert _OrchestratorFactory._dispatcher is None
+
+
+def test_remove_dispatcher_not_built():
+    _OrchestratorFactory._dispatcher = None
+    _OrchestratorFactory._remove_dispatcher()
+    assert _OrchestratorFactory._dispatcher is None
+
+
+def test_remove_dispatcher_development():
+    _OrchestratorFactory._build_orchestrator()
+    _OrchestratorFactory._build_dispatcher()
+    assert _OrchestratorFactory._dispatcher is not None
+    _OrchestratorFactory._remove_dispatcher()
+    assert _OrchestratorFactory._dispatcher is None
+
+
+@pytest.mark.standalone
+def test_remove_dispatcher_standalone():
+    Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
+    _OrchestratorFactory._build_orchestrator()
+    _OrchestratorFactory._build_dispatcher()
+    assert _OrchestratorFactory._dispatcher is not None
+    _OrchestratorFactory._remove_dispatcher()
+    assert _OrchestratorFactory._dispatcher is None

+ 9 - 8
tests/core/cycle/test_cycle_manager.py

@@ -192,16 +192,17 @@ def test_get_cycle_start_date_and_end_date():
 def test_hard_delete_shared_entities():
 def test_hard_delete_shared_entities():
     Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
     Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
 
 
-    dn_config_1 = Config.configure_data_node("my_input_1", "in_memory", scope=Scope.SCENARIO, default_data="testing")
-    dn_config_2 = Config.configure_data_node("my_input_2", "in_memory", scope=Scope.SCENARIO, default_data="testing")
-    dn_config_3 = Config.configure_data_node("my_input_3", "in_memory", scope=Scope.CYCLE, default_data="testing")
-    dn_config_4 = Config.configure_data_node("my_input_4", "in_memory", scope=Scope.GLOBAL, default_data="testing")
+    dn_config_1 = Config.configure_data_node("my_input_1", "pickle", scope=Scope.SCENARIO, default_data="testing")
+    dn_config_2 = Config.configure_data_node("my_input_2", "pickle", scope=Scope.SCENARIO, default_data="testing")
+    dn_config_3 = Config.configure_data_node("my_input_3", "pickle", scope=Scope.CYCLE, default_data="testing")
+    dn_config_4 = Config.configure_data_node("my_input_4", "pickle", scope=Scope.GLOBAL, default_data="testing")
     task_config_1 = Config.configure_task("task_config_1", print, dn_config_1, dn_config_2)
     task_config_1 = Config.configure_task("task_config_1", print, dn_config_1, dn_config_2)
     task_config_2 = Config.configure_task("task_config_2", print, dn_config_2, dn_config_3)
     task_config_2 = Config.configure_task("task_config_2", print, dn_config_2, dn_config_3)
     task_config_3 = Config.configure_task("task_config_3", print, dn_config_3, dn_config_4)  # scope = global
     task_config_3 = Config.configure_task("task_config_3", print, dn_config_3, dn_config_4)  # scope = global
     creation_date = datetime.now()
     creation_date = datetime.now()
+    # Daily frequency so cycle attached to scenarios
     scenario_config_1 = Config.configure_scenario(
     scenario_config_1 = Config.configure_scenario(
-        "scenario_config_1",
+        "scenario_1",
         [task_config_1, task_config_2, task_config_3],
         [task_config_1, task_config_2, task_config_3],
         creation_date=creation_date,
         creation_date=creation_date,
         frequency=Frequency.DAILY,
         frequency=Frequency.DAILY,
@@ -213,9 +214,9 @@ def test_hard_delete_shared_entities():
             "sequence_3": [task_config_3],
             "sequence_3": [task_config_3],
         }
         }
     )
     )
-    scenario_config_2 = Config.configure_scenario(
-        "scenario_config_2", [task_config_2, task_config_3]
-    )  # No Frequency so cycle attached to scenarios
+
+    # No Frequency so no cycle attached to scenarios
+    scenario_config_2 = Config.configure_scenario("scenario_config_2", [task_config_2, task_config_3])
     scenario_config_2.add_sequences({"sequence_3": [task_config_3]})
     scenario_config_2.add_sequences({"sequence_3": [task_config_3]})
 
 
     _OrchestratorFactory._build_dispatcher()
     _OrchestratorFactory._build_dispatcher()

+ 28 - 10
tests/core/data/test_csv_data_node.py

@@ -111,7 +111,7 @@ class TestCSVDataNode:
         dn = CSVDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         dn = CSVDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
         assert os.path.exists(dn.path) is exists
         assert os.path.exists(dn.path) is exists
 
 
-    def test_read_with_header(self):
+    def test_read_with_header_pandas(self):
         not_existing_csv = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.csv", "has_header": True})
         not_existing_csv = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.csv", "has_header": True})
         with pytest.raises(NoData):
         with pytest.raises(NoData):
             assert not_existing_csv.read() is None
             assert not_existing_csv.read() is None
@@ -125,6 +125,9 @@ class TestCSVDataNode:
         assert len(data_pandas) == 10
         assert len(data_pandas) == 10
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
 
 
+    @pytest.mark.modin
+    def test_read_with_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create CSVDataNode with modin exposed_type
         # Create CSVDataNode with modin exposed_type
         csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
         csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
         data_modin = csv_data_node_as_modin.read()
         data_modin = csv_data_node_as_modin.read()
@@ -132,6 +135,8 @@ class TestCSVDataNode:
         assert len(data_modin) == 10
         assert len(data_modin) == 10
         assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy())
         assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy())
 
 
+    def test_read_with_header_numpy(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create CSVDataNode with numpy exposed_type
         # Create CSVDataNode with numpy exposed_type
         csv_data_node_as_numpy = CSVDataNode(
         csv_data_node_as_numpy = CSVDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "has_header": True, "exposed_type": "numpy"}
             "bar", Scope.SCENARIO, properties={"path": path, "has_header": True, "exposed_type": "numpy"}
@@ -141,6 +146,12 @@ class TestCSVDataNode:
         assert len(data_numpy) == 10
         assert len(data_numpy) == 10
         assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy())
         assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy())
 
 
+    def test_read_with_header_custom_exposed_type(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
+        csv_data_node_as_pandas = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path})
+        data_pandas = csv_data_node_as_pandas.read()
+
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
         # Create the same CSVDataNode but with custom exposed_type
         # Create the same CSVDataNode but with custom exposed_type
         csv_data_node_as_custom_object = CSVDataNode(
         csv_data_node_as_custom_object = CSVDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": MyCustomObject}
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": MyCustomObject}
@@ -169,15 +180,6 @@ class TestCSVDataNode:
         assert len(data_pandas) == 11
         assert len(data_pandas) == 11
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy())
 
 
-        # Create CSVDataNode with modin exposed_type
-        csv_data_node_as_modin = CSVDataNode(
-            "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin = csv_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 11
-        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
-
         # Create CSVDataNode with numpy exposed_type
         # Create CSVDataNode with numpy exposed_type
         csv_data_node_as_numpy = CSVDataNode(
         csv_data_node_as_numpy = CSVDataNode(
             "qux", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "numpy"}
             "qux", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "numpy"}
@@ -201,6 +203,18 @@ class TestCSVDataNode:
             assert str(row_pandas[1]) == row_custom.integer
             assert str(row_pandas[1]) == row_custom.integer
             assert row_pandas[2] == row_custom.text
             assert row_pandas[2] == row_custom.text
 
 
+    @pytest.mark.modin
+    def test_read_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
+        # Create CSVDataNode with modin exposed_type
+        csv_data_node_as_modin = CSVDataNode(
+            "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
+        )
+        data_modin = csv_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 11
+        assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
+
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -219,6 +233,7 @@ class TestCSVDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
         )
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -279,6 +294,7 @@ class TestCSVDataNode:
         with pytest.raises(UnicodeError):
         with pytest.raises(UnicodeError):
             utf8_dn.read()
             utf8_dn.read()
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -302,6 +318,7 @@ class TestCSVDataNode:
         csv_dn.write(None)
         csv_dn.write(None)
         assert len(csv_dn.read()) == 0
         assert len(csv_dn.read()) == 0
 
 
+    @pytest.mark.modin
     def test_write_modin_with_different_encoding(self, csv_file):
     def test_write_modin_with_different_encoding(self, csv_file):
         data = pd.DataFrame([{"≥a": 1, "b": 2}])
         data = pd.DataFrame([{"≥a": 1, "b": 2}])
 
 
@@ -394,6 +411,7 @@ class TestCSVDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, csv_file):
     def test_filter_modin_exposed_type(self, csv_file):
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
         dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
         dn.write(
         dn.write(

+ 94 - 71
tests/core/data/test_excel_data_node.py

@@ -146,16 +146,6 @@ class TestExcelDataNode:
         assert len(data_pandas) == 5
         assert len(data_pandas) == 5
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path).to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path).to_numpy())
 
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
-        )
-
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 5
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy())
-
         # Create ExcelDataNode with numpy exposed_type
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
         excel_data_node_as_numpy = ExcelDataNode(
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "numpy", "sheet_name": "Sheet1"}
             "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "numpy", "sheet_name": "Sheet1"}
@@ -191,6 +181,20 @@ class TestExcelDataNode:
             assert row_pandas["integer"] == row_custom.integer
             assert row_pandas["integer"] == row_custom.integer
             assert row_pandas["text"] == row_custom.text
             assert row_pandas["text"] == row_custom.text
 
 
+    @pytest.mark.modin
+    def test_read_with_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": "Sheet1", "exposed_type": "modin"}
+        )
+
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 5
+        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path).to_numpy())
+
     def test_read_without_header(self):
     def test_read_without_header(self):
         not_existing_excel = ExcelDataNode(
         not_existing_excel = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx", "has_header": False}
             "foo", Scope.SCENARIO, properties={"path": "WRONG.xlsx", "has_header": False}
@@ -210,17 +214,6 @@ class TestExcelDataNode:
         assert len(data_pandas) == 6
         assert len(data_pandas) == 6
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path, header=None).to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), pd.read_excel(path, header=None).to_numpy())
 
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": "Sheet1", "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, modin_pd.DataFrame)
-        assert len(data_modin) == 6
-        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy())
-
         # Create ExcelDataNode with numpy exposed_type
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
             "bar",
@@ -263,6 +256,20 @@ class TestExcelDataNode:
             assert row_pandas[1] == row_custom.integer
             assert row_pandas[1] == row_custom.integer
             assert row_pandas[2] == row_custom.text
             assert row_pandas[2] == row_custom.text
 
 
+    @pytest.mark.modin
+    def test_read_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar",
+            Scope.SCENARIO,
+            properties={"path": path, "has_header": False, "sheet_name": "Sheet1", "exposed_type": "modin"},
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, modin_pd.DataFrame)
+        assert len(data_modin) == 6
+        assert np.array_equal(data_modin.to_numpy(), pd.read_excel(path, header=None).to_numpy())
+
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -398,6 +405,7 @@ class TestExcelDataNode:
         else:
         else:
             assert len(excel_dn.read()) == 1
             assert len(excel_dn.read()) == 1
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -460,32 +468,6 @@ class TestExcelDataNode:
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
 
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": sheet_names, "exposed_type": "modin"}
-        )
-
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(
-            len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            for sheet_name in sheet_names
-        )
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name))
-
-        excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"}
-        )
-
-        data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
         # Create ExcelDataNode with numpy exposed_type
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
             "bar",
@@ -636,6 +618,36 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
                 assert row_custom_no_sheet_name.text == row_custom.text
 
 
+    @pytest.mark.modin
+    def test_read_multi_sheet_with_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        sheet_names = ["Sheet1", "Sheet2"]
+
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "sheet_name": sheet_names, "exposed_type": "modin"}
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, Dict)
+        assert len(data_modin) == 2
+        assert all(
+            len(data_modin[sheet_name] == 5) and isinstance(data_modin[sheet_name], modin_pd.DataFrame)
+            for sheet_name in sheet_names
+        )
+        assert list(data_modin.keys()) == sheet_names
+        for sheet_name in sheet_names:
+            assert data_modin[sheet_name].equals(modin_pd.read_excel(path, sheet_name=sheet_name))
+
+        excel_data_node_as_pandas_no_sheet_name = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"}
+        )
+
+        data_modin_no_sheet_name = excel_data_node_as_pandas_no_sheet_name.read()
+        assert isinstance(data_modin_no_sheet_name, Dict)
+        for key in data_modin_no_sheet_name.keys():
+            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
+            assert data_modin[key].equals(data_modin_no_sheet_name[key])
+
     def test_read_multi_sheet_without_header(self):
     def test_read_multi_sheet_without_header(self):
         not_existing_excel = ExcelDataNode(
         not_existing_excel = ExcelDataNode(
             "foo",
             "foo",
@@ -671,30 +683,6 @@ class TestExcelDataNode:
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert isinstance(data_pandas_no_sheet_name[key], pd.DataFrame)
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
             assert data_pandas[key].equals(data_pandas_no_sheet_name[key])
 
 
-        # Create ExcelDataNode with modin exposed_type
-        excel_data_node_as_modin = ExcelDataNode(
-            "bar",
-            Scope.SCENARIO,
-            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "exposed_type": "modin"},
-        )
-        data_modin = excel_data_node_as_modin.read()
-        assert isinstance(data_modin, Dict)
-        assert len(data_modin) == 2
-        assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names)
-        assert list(data_modin.keys()) == sheet_names
-        for sheet_name in sheet_names:
-            assert isinstance(data_modin[sheet_name], modin_pd.DataFrame)
-            assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name))
-
-        excel_data_node_as_modin_no_sheet_name = ExcelDataNode(
-            "bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
-        )
-        data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read()
-        assert isinstance(data_modin_no_sheet_name, Dict)
-        for key in data_modin_no_sheet_name.keys():
-            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
-            assert data_modin[key].equals(data_modin_no_sheet_name[key])
-
         # Create ExcelDataNode with numpy exposed_type
         # Create ExcelDataNode with numpy exposed_type
         excel_data_node_as_numpy = ExcelDataNode(
         excel_data_node_as_numpy = ExcelDataNode(
             "bar",
             "bar",
@@ -864,6 +852,34 @@ class TestExcelDataNode:
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.integer == row_custom.integer
                 assert row_custom_no_sheet_name.text == row_custom.text
                 assert row_custom_no_sheet_name.text == row_custom.text
 
 
+    @pytest.mark.modin
+    def test_read_multi_sheet_without_header_modin(self):
+        path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.xlsx")
+        sheet_names = ["Sheet1", "Sheet2"]
+        # Create ExcelDataNode with modin exposed_type
+        excel_data_node_as_modin = ExcelDataNode(
+            "bar",
+            Scope.SCENARIO,
+            properties={"path": path, "has_header": False, "sheet_name": sheet_names, "exposed_type": "modin"},
+        )
+        data_modin = excel_data_node_as_modin.read()
+        assert isinstance(data_modin, Dict)
+        assert len(data_modin) == 2
+        assert all(len(data_modin[sheet_name]) == 6 for sheet_name in sheet_names)
+        assert list(data_modin.keys()) == sheet_names
+        for sheet_name in sheet_names:
+            assert isinstance(data_modin[sheet_name], modin_pd.DataFrame)
+            assert data_modin[sheet_name].equals(pd.read_excel(path, header=None, sheet_name=sheet_name))
+
+        excel_data_node_as_modin_no_sheet_name = ExcelDataNode(
+            "bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
+        )
+        data_modin_no_sheet_name = excel_data_node_as_modin_no_sheet_name.read()
+        assert isinstance(data_modin_no_sheet_name, Dict)
+        for key in data_modin_no_sheet_name.keys():
+            assert isinstance(data_modin_no_sheet_name[key], modin_pd.DataFrame)
+            assert data_modin[key].equals(data_modin_no_sheet_name[key])
+
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -908,6 +924,7 @@ class TestExcelDataNode:
         read_data = excel_dn.read()
         read_data = excel_dn.read()
         assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names)
         assert all(np.array_equal(data[sheet_name], read_data[sheet_name]) for sheet_name in sheet_names)
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content,columns",
         "content,columns",
         [
         [
@@ -1041,6 +1058,7 @@ class TestExcelDataNode:
         )
         )
         assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
         assert_frame_equal(dn.read()["Sheet2"], default_multi_sheet_data_frame["Sheet2"])
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -1063,6 +1081,7 @@ class TestExcelDataNode:
             ).reset_index(drop=True),
             ).reset_index(drop=True),
         )
         )
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -1083,6 +1102,7 @@ class TestExcelDataNode:
             ),
             ),
         )
         )
 
 
+    @pytest.mark.modin
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
         [
         [
@@ -1259,6 +1279,7 @@ class TestExcelDataNode:
         assert dn["sheet_1"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["sheet_1"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["sheet_2"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
         assert dn["sheet_2"][:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 3.0}, {"foo": 1.0, "bar": 4.0}]))
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_with_sheetname(self, excel_file):
     def test_filter_modin_exposed_type_with_sheetname(self, excel_file):
         dn = ExcelDataNode(
         dn = ExcelDataNode(
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
             "foo", Scope.SCENARIO, properties={"path": excel_file, "sheet_name": "Sheet1", "exposed_type": "modin"}
@@ -1310,6 +1331,7 @@ class TestExcelDataNode:
         df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
         df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
         df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
         df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_without_sheetname(self, excel_file):
     def test_filter_modin_exposed_type_without_sheetname(self, excel_file):
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "modin"})
         dn = ExcelDataNode("foo", Scope.SCENARIO, properties={"path": excel_file, "exposed_type": "modin"})
         dn.write(
         dn.write(
@@ -1331,6 +1353,7 @@ class TestExcelDataNode:
         assert dn["Sheet1"]["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
         assert dn["Sheet1"]["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
         assert dn["Sheet1"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
         assert dn["Sheet1"][:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type_multisheet(self, excel_file):
     def test_filter_modin_exposed_type_multisheet(self, excel_file):
         dn = ExcelDataNode(
         dn = ExcelDataNode(
             "foo",
             "foo",

+ 45 - 10
tests/core/data/test_parquet_data_node.py

@@ -155,6 +155,19 @@ class TestParquetDataNode:
         assert data_pandas.equals(df)
         assert data_pandas.equals(df)
         assert np.array_equal(data_pandas.to_numpy(), df.to_numpy())
         assert np.array_equal(data_pandas.to_numpy(), df.to_numpy())
 
 
+        # Create ParquetDataNode with numpy exposed_type
+        parquet_data_node_as_numpy = ParquetDataNode(
+            "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy", "engine": engine}
+        )
+        data_numpy = parquet_data_node_as_numpy.read()
+        assert isinstance(data_numpy, np.ndarray)
+        assert len(data_numpy) == 2
+        assert np.array_equal(data_numpy, df.to_numpy())
+
+    @pytest.mark.modin
+    @pytest.mark.parametrize("engine", __engine)
+    def test_read_file_modin(self, engine, parquet_file_path):
+        df = pd.read_parquet(parquet_file_path)
         # Create ParquetDataNode with modin exposed_type
         # Create ParquetDataNode with modin exposed_type
         parquet_data_node_as_modin = ParquetDataNode(
         parquet_data_node_as_modin = ParquetDataNode(
             "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
             "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
@@ -165,15 +178,6 @@ class TestParquetDataNode:
         assert data_modin.equals(df)
         assert data_modin.equals(df)
         assert np.array_equal(data_modin.to_numpy(), df.to_numpy())
         assert np.array_equal(data_modin.to_numpy(), df.to_numpy())
 
 
-        # Create ParquetDataNode with numpy exposed_type
-        parquet_data_node_as_numpy = ParquetDataNode(
-            "bar", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "numpy", "engine": engine}
-        )
-        data_numpy = parquet_data_node_as_numpy.read()
-        assert isinstance(data_numpy, np.ndarray)
-        assert len(data_numpy) == 2
-        assert np.array_equal(data_numpy, df.to_numpy())
-
     @pytest.mark.parametrize("engine", __engine)
     @pytest.mark.parametrize("engine", __engine)
     def test_read_folder(self, engine):
     def test_read_folder(self, engine):
         parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
         parquet_folder_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
@@ -317,6 +321,7 @@ class TestParquetDataNode:
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
             pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
         )
         )
 
 
+    @pytest.mark.modin
     @pytest.mark.skipif(not util.find_spec("fastparquet"), reason="Append parquet requires fastparquet to be installed")
     @pytest.mark.skipif(not util.find_spec("fastparquet"), reason="Append parquet requires fastparquet to be installed")
     @pytest.mark.parametrize(
     @pytest.mark.parametrize(
         "content",
         "content",
@@ -342,7 +347,6 @@ class TestParquetDataNode:
         [
         [
             [{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}],
             [{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}],
             pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
             pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
-            modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
         ],
         ],
     )
     )
     def test_write_to_disk(self, tmpdir_factory, data):
     def test_write_to_disk(self, tmpdir_factory, data):
@@ -353,6 +357,36 @@ class TestParquetDataNode:
         assert pathlib.Path(temp_file_path).exists()
         assert pathlib.Path(temp_file_path).exists()
         assert isinstance(dn.read(), pd.DataFrame)
         assert isinstance(dn.read(), pd.DataFrame)
 
 
+        @pytest.mark.modin
+        @pytest.mark.parametrize(
+            "data",
+            [
+                modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
+            ],
+        )
+        def test_write_to_disk_modin(self, tmpdir_factory, data):
+            temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
+            dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
+            dn.write(data)
+
+            assert pathlib.Path(temp_file_path).exists()
+            assert isinstance(dn.read(), pd.DataFrame)
+
+    @pytest.mark.modin
+    @pytest.mark.parametrize(
+        "data",
+        [
+            modin_pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
+        ],
+    )
+    def test_write_to_disk_modin(self, tmpdir_factory, data):
+        temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
+        dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path})
+        dn.write(data)
+
+        assert pathlib.Path(temp_file_path).exists()
+        assert isinstance(dn.read(), pd.DataFrame)
+
     def test_filter_pandas_exposed_type(self, parquet_file_path):
     def test_filter_pandas_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "pandas"})
         dn.write(
         dn.write(
@@ -402,6 +436,7 @@ class TestParquetDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, parquet_file_path):
     def test_filter_modin_exposed_type(self, parquet_file_path):
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin"})
         dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": parquet_file_path, "exposed_type": "modin"})
         dn.write(
         dn.write(

+ 2 - 1
tests/core/data/test_pickle_data_node.py

@@ -124,9 +124,10 @@ class TestPickleDataNodeEntity:
         assert isinstance(pickle_dict.read(), dict)
         assert isinstance(pickle_dict.read(), dict)
         assert pickle_dict.read() == {"bar": 12, "baz": "qux", "quux": [13]}
         assert pickle_dict.read() == {"bar": 12, "baz": "qux", "quux": [13]}
 
 
+    @pytest.mark.modin
+    def test_read_and_write_modin(self):
         default_pandas = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         default_pandas = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         new_pandas_df = pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
         new_pandas_df = pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
-
         default_modin = modin_pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         default_modin = modin_pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
         new_modin_df = modin_pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
         new_modin_df = modin_pd.DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
 
 

+ 2 - 0
tests/core/data/test_sql_data_node.py

@@ -337,6 +337,7 @@ class TestSQLDataNode:
         dn.append(append_data_1)
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
 
+    @pytest.mark.modin
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {
@@ -430,6 +431,7 @@ class TestSQLDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {

+ 2 - 0
tests/core/data/test_sql_table_data_node.py

@@ -464,6 +464,7 @@ class TestSQLTableDataNode:
         dn.append(append_data_1)
         dn.append(append_data_1)
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
         assert_frame_equal(dn.read(), pd.concat([original_data, append_data_1]).reset_index(drop=True))
 
 
+    @pytest.mark.modin
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
     def test_sqlite_append_modin(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {
@@ -539,6 +540,7 @@ class TestSQLTableDataNode:
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
         assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
 
 
+    @pytest.mark.modin
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
     def test_filter_modin_exposed_type(self, tmp_sqlite_sqlite3_file_path):
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         folder_path, db_name, file_extension = tmp_sqlite_sqlite3_file_path
         properties = {
         properties = {

+ 2 - 1
tests/core/job/test_job.py

@@ -16,9 +16,10 @@ from unittest import mock
 from unittest.mock import MagicMock
 from unittest.mock import MagicMock
 
 
 import pytest
 import pytest
+
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
-from taipy.core import JobId, Sequence, SequenceId, TaskId
+from taipy.core import JobId, TaskId
 from taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher
 from taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher
 from taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher
 from taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
 from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory

+ 1 - 1
tests/core/notification/test_events_published.py

@@ -64,7 +64,7 @@ class RecordingConsumer(CoreEventConsumerBase):
         return snapshot
         return snapshot
 
 
     def process_event(self, event: Event):
     def process_event(self, event: Event):
-        # Nothing todo
+        # Nothing to do
         pass
         pass
 
 
     def start(self):
     def start(self):

+ 1 - 0
tests/core/scenario/test_scenario_manager.py

@@ -1216,6 +1216,7 @@ def test_scenarios_comparison_development_mode():
         _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id="abc")
         _ScenarioManager._compare(scenario_1, scenario_2, data_node_config_id="abc")
 
 
 
 
+@pytest.mark.standalone
 def test_scenarios_comparison_standalone_mode():
 def test_scenarios_comparison_standalone_mode():
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
     Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
 
 

+ 1 - 0
tests/core/test_core.py

@@ -109,6 +109,7 @@ class TestCore:
                 Config.configure_data_node(id="i1")
                 Config.configure_data_node(id="i1")
             core.stop()
             core.stop()
 
 
+    @pytest.mark.standalone
     def test_block_config_update_when_core_service_is_running_standalone_mode(self):
     def test_block_config_update_when_core_service_is_running_standalone_mode(self):
         _OrchestratorFactory._dispatcher = None
         _OrchestratorFactory._dispatcher = None
 
 

+ 21 - 23
tests/core/test_core_cli.py

@@ -16,7 +16,7 @@ import pytest
 from taipy.config.common.frequency import Frequency
 from taipy.config.common.frequency import Frequency
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
 from taipy.config.config import Config
 from taipy.config.config import Config
-from taipy.core import Core
+from taipy.core import Core, taipy
 from taipy.core._version._version_manager import _VersionManager
 from taipy.core._version._version_manager import _VersionManager
 from taipy.core._version._version_manager_factory import _VersionManagerFactory
 from taipy.core._version._version_manager_factory import _VersionManagerFactory
 from taipy.core.common._utils import _load_fct
 from taipy.core.common._utils import _load_fct
@@ -107,7 +107,7 @@ def test_dev_mode_clean_all_entities_of_the_latest_version():
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
     # Initial assertion
     # Initial assertion
@@ -123,7 +123,7 @@ def test_dev_mode_clean_all_entities_of_the_latest_version():
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
     # Assert number of entities in 2nd version
     # Assert number of entities in 2nd version
@@ -131,9 +131,8 @@ def test_dev_mode_clean_all_entities_of_the_latest_version():
     assert len(_TaskManager._get_all(version_number="all")) == 2
     assert len(_TaskManager._get_all(version_number="all")) == 2
     assert len(_SequenceManager._get_all(version_number="all")) == 2
     assert len(_SequenceManager._get_all(version_number="all")) == 2
     assert len(_ScenarioManager._get_all(version_number="all")) == 2
     assert len(_ScenarioManager._get_all(version_number="all")) == 2
-    assert (
-        len(_CycleManager._get_all(version_number="all")) == 1
-    )  # No new cycle is created since old dev version use the same cycle
+    # No new cycle is created since old dev version use the same cycle
+    assert len(_CycleManager._get_all(version_number="all")) == 1
     assert len(_JobManager._get_all(version_number="all")) == 2
     assert len(_JobManager._get_all(version_number="all")) == 2
 
 
     # Run development mode again
     # Run development mode again
@@ -151,7 +150,7 @@ def test_dev_mode_clean_all_entities_of_the_latest_version():
 
 
         # Submit new dev version
         # Submit new dev version
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
         # Assert number of entities with 1 dev version and 1 exp version
         # Assert number of entities with 1 dev version and 1 exp version
@@ -206,7 +205,7 @@ def test_dev_mode_clean_all_entities_when_config_is_alternated():
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
     # Delete the twice_doppelganger function
     # Delete the twice_doppelganger function
@@ -220,7 +219,7 @@ def test_dev_mode_clean_all_entities_when_config_is_alternated():
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
 
 
@@ -309,7 +308,7 @@ def test_production_mode_load_all_entities_from_previous_production_version():
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
     with patch("sys.argv", ["prog", "--production", "1.0"]):
     with patch("sys.argv", ["prog", "--production", "1.0"]):
@@ -322,7 +321,7 @@ def test_production_mode_load_all_entities_from_previous_production_version():
         assert len(_VersionManager._get_all()) == 2
         assert len(_VersionManager._get_all()) == 2
 
 
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 2
         assert len(_DataManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 1
         assert len(_TaskManager._get_all()) == 1
@@ -341,7 +340,7 @@ def test_production_mode_load_all_entities_from_previous_production_version():
 
 
         # All entities from previous production version should be saved
         # All entities from previous production version should be saved
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 4
         assert len(_DataManager._get_all()) == 4
         assert len(_TaskManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 2
@@ -364,7 +363,7 @@ def test_force_override_experiment_version():
         assert len(_VersionManager._get_all()) == 2  # 2 version include 1 experiment 1 development
         assert len(_VersionManager._get_all()) == 2  # 2 version include 1 experiment 1 development
 
 
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 2
         assert len(_DataManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 1
         assert len(_TaskManager._get_all()) == 1
@@ -393,7 +392,7 @@ def test_force_override_experiment_version():
 
 
         # All entities from previous submit should be saved
         # All entities from previous submit should be saved
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 4
         assert len(_DataManager._get_all()) == 4
         assert len(_TaskManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 2
@@ -418,7 +417,7 @@ def test_force_override_production_version():
         assert len(_VersionManager._get_all()) == 2  # 2 version include 1 production 1 development
         assert len(_VersionManager._get_all()) == 2  # 2 version include 1 production 1 development
 
 
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 2
         assert len(_DataManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 1
         assert len(_TaskManager._get_all()) == 1
@@ -447,7 +446,7 @@ def test_force_override_production_version():
 
 
         # All entities from previous submit should be saved
         # All entities from previous submit should be saved
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
 
 
         assert len(_DataManager._get_all()) == 4
         assert len(_DataManager._get_all()) == 4
         assert len(_TaskManager._get_all()) == 2
         assert len(_TaskManager._get_all()) == 2
@@ -458,6 +457,7 @@ def test_force_override_production_version():
         core.stop()
         core.stop()
 
 
 
 
+@pytest.mark.standalone
 def test_modify_job_configuration_dont_stop_application(caplog, init_config):
 def test_modify_job_configuration_dont_stop_application(caplog, init_config):
     scenario_config = config_scenario()
     scenario_config = config_scenario()
 
 
@@ -466,20 +466,17 @@ def test_modify_job_configuration_dont_stop_application(caplog, init_config):
         Config.configure_job_executions(mode="development")
         Config.configure_job_executions(mode="development")
         core.run(force_restart=True)
         core.run(force_restart=True)
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        jobs = _ScenarioManager._submit(scenario)
+        jobs = taipy.submit(scenario)
         assert all([job.is_finished() for job in jobs])
         assert all([job.is_finished() for job in jobs])
         core.stop()
         core.stop()
-
     init_config()
     init_config()
     scenario_config = config_scenario()
     scenario_config = config_scenario()
-
     with patch("sys.argv", ["prog", "--experiment", "1.0"]):
     with patch("sys.argv", ["prog", "--experiment", "1.0"]):
         core = Core()
         core = Core()
         Config.configure_job_executions(mode="standalone", max_nb_of_workers=5)
         Config.configure_job_executions(mode="standalone", max_nb_of_workers=5)
         core.run(force_restart=True)
         core.run(force_restart=True)
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-
-        jobs = _ScenarioManager._submit(scenario)
+        jobs = taipy.submit(scenario)
         assert_true_after_time(lambda: all(job.is_finished() for job in jobs))
         assert_true_after_time(lambda: all(job.is_finished() for job in jobs))
         error_message = str(caplog.text)
         error_message = str(caplog.text)
         assert 'JOB "mode" was modified' in error_message
         assert 'JOB "mode" was modified' in error_message
@@ -487,6 +484,7 @@ def test_modify_job_configuration_dont_stop_application(caplog, init_config):
         core.stop()
         core.stop()
 
 
 
 
+@pytest.mark.standalone
 def test_modify_config_properties_without_force(caplog, init_config):
 def test_modify_config_properties_without_force(caplog, init_config):
     scenario_config = config_scenario()
     scenario_config = config_scenario()
 
 
@@ -494,7 +492,7 @@ def test_modify_config_properties_without_force(caplog, init_config):
         core = Core()
         core = Core()
         core.run()
         core.run()
         scenario = _ScenarioManager._create(scenario_config)
         scenario = _ScenarioManager._create(scenario_config)
-        _ScenarioManager._submit(scenario)
+        taipy.submit(scenario)
         core.stop()
         core.stop()
 
 
     init_config()
     init_config()
@@ -506,7 +504,7 @@ def test_modify_config_properties_without_force(caplog, init_config):
             core = Core()
             core = Core()
             core.run()
             core.run()
             scenario = _ScenarioManager._create(scenario_config_2)
             scenario = _ScenarioManager._create(scenario_config_2)
-            _ScenarioManager._submit(scenario)
+            taipy.submit(scenario)
     core.stop()
     core.stop()
     error_message = str(caplog.text)
     error_message = str(caplog.text)
 
 

+ 6 - 0
tests/core/version/test_production_version_migration.py

@@ -12,6 +12,8 @@
 import multiprocessing
 import multiprocessing
 from unittest.mock import patch
 from unittest.mock import patch
 
 
+import pytest
+
 from taipy.config.config import Config
 from taipy.config.config import Config
 from taipy.core import Core, taipy
 from taipy.core import Core, taipy
 from taipy.core.data._data_manager import _DataManager
 from taipy.core.data._data_manager import _DataManager
@@ -56,6 +58,7 @@ def test_migrate_datanode(init_config):
     assert v1.d1.path == "bar.pkl"
     assert v1.d1.path == "bar.pkl"
 
 
 
 
+@pytest.mark.standalone
 def test_migrate_datanode_in_standalone_mode(init_config):
 def test_migrate_datanode_in_standalone_mode(init_config):
     scenario_v1 = submit_v1()
     scenario_v1 = submit_v1()
 
 
@@ -88,6 +91,7 @@ def test_migrate_task(init_config):
     assert v1.my_task.skippable is True
     assert v1.my_task.skippable is True
 
 
 
 
+@pytest.mark.standalone
 def test_migrate_task_in_standalone_mode(init_config):
 def test_migrate_task_in_standalone_mode(init_config):
     scenario_v1 = submit_v1()
     scenario_v1 = submit_v1()
 
 
@@ -120,6 +124,7 @@ def test_migrate_scenario(init_config):
     assert v1.properties["foo"] == "bar"
     assert v1.properties["foo"] == "bar"
 
 
 
 
+@pytest.mark.standalone
 def test_migrate_scenario_in_standalone_mode(init_config):
 def test_migrate_scenario_in_standalone_mode(init_config):
     scenario_v1 = submit_v1()
     scenario_v1 = submit_v1()
 
 
@@ -159,6 +164,7 @@ def test_migrate_all_entities(init_config):
     assert v1.properties["foo"] == "bar"
     assert v1.properties["foo"] == "bar"
 
 
 
 
+@pytest.mark.standalone
 def test_migrate_all_entities_in_standalone_mode(init_config):
 def test_migrate_all_entities_in_standalone_mode(init_config):
     scenario_v1 = submit_v1()
     scenario_v1 = submit_v1()
 
 

+ 1 - 1
tools/packages/taipy-core/setup.requirements.txt

@@ -1,7 +1,7 @@
 pyarrow>=10.0.1,<11.0
 pyarrow>=10.0.1,<11.0
 networkx>=2.6,<3.0
 networkx>=2.6,<3.0
 openpyxl>=3.1.2,<3.2
 openpyxl>=3.1.2,<3.2
-modin[dask]>=0.23.0,<1.0
+modin[dask]>=0.23.1,<1.0
 pymongo[srv]>=4.2.0,<5.0
 pymongo[srv]>=4.2.0,<5.0
 sqlalchemy>=2.0.16,<2.1
 sqlalchemy>=2.0.16,<2.1
 toml>=0.10,<0.11
 toml>=0.10,<0.11