|
@@ -14,11 +14,13 @@ import random
|
|
import string
|
|
import string
|
|
from functools import partial
|
|
from functools import partial
|
|
from time import sleep
|
|
from time import sleep
|
|
|
|
+from typing import cast
|
|
|
|
|
|
import pytest
|
|
import pytest
|
|
|
|
|
|
from taipy.config import Config
|
|
from taipy.config import Config
|
|
from taipy.config.common.scope import Scope
|
|
from taipy.config.common.scope import Scope
|
|
|
|
+from taipy.core._orchestrator._dispatcher import _StandaloneJobDispatcher
|
|
from taipy.core._orchestrator._orchestrator import _Orchestrator
|
|
from taipy.core._orchestrator._orchestrator import _Orchestrator
|
|
from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
|
|
from taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
|
|
from taipy.core.config.job_config import JobConfig
|
|
from taipy.core.config.job_config import JobConfig
|
|
@@ -57,7 +59,7 @@ def test_submit_task_multithreading_multiple_task():
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
|
|
|
|
- _OrchestratorFactory._build_dispatcher(force_restart=True)
|
|
|
|
|
|
+ dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
|
|
|
|
|
|
with lock_1:
|
|
with lock_1:
|
|
with lock_2:
|
|
with lock_2:
|
|
@@ -70,7 +72,7 @@ def test_submit_task_multithreading_multiple_task():
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 0
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 0
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_2.is_running)
|
|
assert_true_after_time(job_2.is_running)
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 0
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
|
|
|
|
@@ -78,14 +80,14 @@ def test_submit_task_multithreading_multiple_task():
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 1
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_2, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission_2, SubmissionStatus.COMPLETED)
|
|
|
|
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert job_2.is_completed()
|
|
assert job_2.is_completed()
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 2
|
|
assert_submission_status(submission_1, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission_1, SubmissionStatus.COMPLETED)
|
|
assert submission_2.submission_status == SubmissionStatus.COMPLETED
|
|
assert submission_2.submission_status == SubmissionStatus.COMPLETED
|
|
|
|
|
|
@@ -93,17 +95,13 @@ def test_submit_task_multithreading_multiple_task():
|
|
@pytest.mark.orchestrator_dispatcher
|
|
@pytest.mark.orchestrator_dispatcher
|
|
def test_submit_submittable_multithreading_multiple_task():
|
|
def test_submit_submittable_multithreading_multiple_task():
|
|
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
|
|
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
|
|
-
|
|
|
|
m = multiprocessing.Manager()
|
|
m = multiprocessing.Manager()
|
|
lock_1 = m.Lock()
|
|
lock_1 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
-
|
|
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
-
|
|
|
|
- scenario = Scenario("scenario_config", [task_1, task_2], {})
|
|
|
|
-
|
|
|
|
- _OrchestratorFactory._build_dispatcher(force_restart=True)
|
|
|
|
|
|
+ scenario = Scenario("scenario_config", {task_1, task_2}, {})
|
|
|
|
+ dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
|
|
|
|
|
|
with lock_1:
|
|
with lock_1:
|
|
with lock_2:
|
|
with lock_2:
|
|
@@ -116,20 +114,20 @@ def test_submit_submittable_multithreading_multiple_task():
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 0
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 0
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_2.is_running)
|
|
assert_true_after_time(job_2.is_running)
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2 # Two processes used
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 0 # Two processes used
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
assert_true_after_time(job_2.is_completed)
|
|
assert_true_after_time(job_2.is_completed)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1 # job 1 is completed: One process used
|
|
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
|
|
+ assert dispatcher._nb_available_workers == 1 # job 1 is completed: One process used
|
|
|
|
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert job_2.is_completed()
|
|
assert job_2.is_completed()
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 # No more process used.
|
|
|
|
assert_submission_status(submission, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission, SubmissionStatus.COMPLETED)
|
|
|
|
+ assert dispatcher._nb_available_workers == 2 # No more process used.
|
|
|
|
|
|
|
|
|
|
@pytest.mark.orchestrator_dispatcher
|
|
@pytest.mark.orchestrator_dispatcher
|
|
@@ -142,13 +140,13 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
|
|
task_0 = _create_task(partial(lock_multiply, lock_0))
|
|
task_0 = _create_task(partial(lock_multiply, lock_0))
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_1 = _create_task(partial(lock_multiply, lock_1))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
task_2 = _create_task(partial(lock_multiply, lock_2))
|
|
- _OrchestratorFactory._build_dispatcher(force_restart=True)
|
|
|
|
|
|
+ dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
|
|
|
|
|
|
with lock_0:
|
|
with lock_0:
|
|
submission_0 = _Orchestrator.submit_task(task_0)
|
|
submission_0 = _Orchestrator.submit_task(task_0)
|
|
job_0 = submission_0._jobs[0]
|
|
job_0 = submission_0._jobs[0]
|
|
assert_true_after_time(job_0.is_running)
|
|
assert_true_after_time(job_0.is_running)
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 1
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
with lock_1:
|
|
with lock_1:
|
|
with lock_2:
|
|
with lock_2:
|
|
@@ -164,12 +162,12 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_1, SubmissionStatus.PENDING)
|
|
assert_submission_status(submission_1, SubmissionStatus.PENDING)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 0
|
|
|
|
|
|
assert_true_after_time(job_0.is_running)
|
|
assert_true_after_time(job_0.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_1.is_running)
|
|
assert_true_after_time(job_2.is_completed)
|
|
assert_true_after_time(job_2.is_completed)
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 2
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 0
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_2.output[f"{task_2.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 0
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
@@ -179,7 +177,7 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
|
|
assert_true_after_time(job_0.is_running)
|
|
assert_true_after_time(job_0.is_running)
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert_true_after_time(job_1.is_completed)
|
|
assert job_2.is_completed()
|
|
assert job_2.is_completed()
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 1
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
assert task_1.output[f"{task_1.config_id}_output0"].read() == 42
|
|
assert task_0.output[f"{task_0.config_id}_output0"].read() == 0
|
|
assert task_0.output[f"{task_0.config_id}_output0"].read() == 0
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_0, SubmissionStatus.RUNNING)
|
|
@@ -189,7 +187,7 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
|
|
assert_true_after_time(job_0.is_completed)
|
|
assert_true_after_time(job_0.is_completed)
|
|
assert job_1.is_completed()
|
|
assert job_1.is_completed()
|
|
assert job_2.is_completed()
|
|
assert job_2.is_completed()
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 2
|
|
assert task_0.output[f"{task_0.config_id}_output0"].read() == 42
|
|
assert task_0.output[f"{task_0.config_id}_output0"].read() == 42
|
|
assert _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.COMPLETED
|
|
assert _SubmissionManager._get(job_0.submit_id).submission_status == SubmissionStatus.COMPLETED
|
|
assert _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED
|
|
assert _SubmissionManager._get(job_1.submit_id).submission_status == SubmissionStatus.COMPLETED
|
|
@@ -198,17 +196,15 @@ def test_submit_task_multithreading_multiple_task_in_sync_way_to_check_job_statu
|
|
|
|
|
|
@pytest.mark.orchestrator_dispatcher
|
|
@pytest.mark.orchestrator_dispatcher
|
|
def test_blocked_task():
|
|
def test_blocked_task():
|
|
- Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
|
|
|
|
-
|
|
|
|
|
|
+ Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=4)
|
|
m = multiprocessing.Manager()
|
|
m = multiprocessing.Manager()
|
|
lock_1 = m.Lock()
|
|
lock_1 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
-
|
|
|
|
foo_cfg = Config.configure_data_node("foo", default_data=1)
|
|
foo_cfg = Config.configure_data_node("foo", default_data=1)
|
|
bar_cfg = Config.configure_data_node("bar")
|
|
bar_cfg = Config.configure_data_node("bar")
|
|
baz_cfg = Config.configure_data_node("baz")
|
|
baz_cfg = Config.configure_data_node("baz")
|
|
|
|
|
|
- _OrchestratorFactory._build_dispatcher(force_restart=True)
|
|
|
|
|
|
+ dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
|
|
|
|
|
|
dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg])
|
|
dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg])
|
|
foo = dns[foo_cfg]
|
|
foo = dns[foo_cfg]
|
|
@@ -216,16 +212,14 @@ def test_blocked_task():
|
|
baz = dns[baz_cfg]
|
|
baz = dns[baz_cfg]
|
|
task_1 = Task("by_2", {}, partial(lock_multiply, lock_1, 2), [foo], [bar])
|
|
task_1 = Task("by_2", {}, partial(lock_multiply, lock_1, 2), [foo], [bar])
|
|
task_2 = Task("by_3", {}, partial(lock_multiply, lock_2, 3), [bar], [baz])
|
|
task_2 = Task("by_3", {}, partial(lock_multiply, lock_2, 3), [bar], [baz])
|
|
-
|
|
|
|
assert task_1.foo.is_ready_for_reading # foo is ready
|
|
assert task_1.foo.is_ready_for_reading # foo is ready
|
|
assert not task_1.bar.is_ready_for_reading # But bar is not ready
|
|
assert not task_1.bar.is_ready_for_reading # But bar is not ready
|
|
assert not task_2.baz.is_ready_for_reading # neither does baz
|
|
assert not task_2.baz.is_ready_for_reading # neither does baz
|
|
-
|
|
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
submission_2 = _Orchestrator.submit_task(task_2)
|
|
submission_2 = _Orchestrator.submit_task(task_2)
|
|
job_2 = submission_2._jobs[0] # job 2 is submitted
|
|
job_2 = submission_2._jobs[0] # job 2 is submitted
|
|
assert job_2.is_blocked() # since bar is not is_valid the job 2 is blocked
|
|
assert job_2.is_blocked() # since bar is not is_valid the job 2 is blocked
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 # No process used
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 4 # No process used
|
|
assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.BLOCKED
|
|
assert _SubmissionManager._get(job_2.submit_id).submission_status == SubmissionStatus.BLOCKED
|
|
assert len(_Orchestrator.blocked_jobs) == 1 # One job (job 2) is blocked
|
|
assert len(_Orchestrator.blocked_jobs) == 1 # One job (job 2) is blocked
|
|
with lock_2:
|
|
with lock_2:
|
|
@@ -233,7 +227,7 @@ def test_blocked_task():
|
|
submission_1 = _Orchestrator.submit_task(task_1)
|
|
submission_1 = _Orchestrator.submit_task(task_1)
|
|
job_1 = submission_1._jobs[0] # job 1 is submitted and locked
|
|
job_1 = submission_1._jobs[0] # job 1 is submitted and locked
|
|
assert_true_after_time(job_1.is_running) # so it is still running
|
|
assert_true_after_time(job_1.is_running) # so it is still running
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1 # One process used for job 1
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 3 # One process used for job 1
|
|
assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready
|
|
assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready
|
|
assert job_2.is_blocked # the job_2 remains blocked
|
|
assert job_2.is_blocked # the job_2 remains blocked
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_1, SubmissionStatus.RUNNING)
|
|
@@ -242,14 +236,14 @@ def test_blocked_task():
|
|
assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready
|
|
assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready
|
|
assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written
|
|
assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written
|
|
assert_true_after_time(job_2.is_running) # And job 2 can start running
|
|
assert_true_after_time(job_2.is_running) # And job 2 can start running
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1 # One process used for job 2
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 3 # One process used for job 2
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert_submission_status(submission_1, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission_1, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission_2, SubmissionStatus.RUNNING)
|
|
assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete
|
|
assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete
|
|
assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready
|
|
assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready
|
|
assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written
|
|
assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 # No more process used.
|
|
|
|
|
|
+ assert dispatcher._nb_available_workers == 4 # No more process used.
|
|
assert submission_1.submission_status == SubmissionStatus.COMPLETED
|
|
assert submission_1.submission_status == SubmissionStatus.COMPLETED
|
|
assert_submission_status(submission_2, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission_2, SubmissionStatus.COMPLETED)
|
|
|
|
|
|
@@ -257,29 +251,23 @@ def test_blocked_task():
|
|
@pytest.mark.orchestrator_dispatcher
|
|
@pytest.mark.orchestrator_dispatcher
|
|
def test_blocked_submittable():
|
|
def test_blocked_submittable():
|
|
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
|
|
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
|
|
-
|
|
|
|
m = multiprocessing.Manager()
|
|
m = multiprocessing.Manager()
|
|
lock_1 = m.Lock()
|
|
lock_1 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
lock_2 = m.Lock()
|
|
-
|
|
|
|
foo_cfg = Config.configure_data_node("foo", default_data=1)
|
|
foo_cfg = Config.configure_data_node("foo", default_data=1)
|
|
bar_cfg = Config.configure_data_node("bar")
|
|
bar_cfg = Config.configure_data_node("bar")
|
|
baz_cfg = Config.configure_data_node("baz")
|
|
baz_cfg = Config.configure_data_node("baz")
|
|
-
|
|
|
|
- _OrchestratorFactory._build_dispatcher(force_restart=True)
|
|
|
|
-
|
|
|
|
|
|
+ dispatcher = cast(_StandaloneJobDispatcher, _OrchestratorFactory._build_dispatcher(force_restart=True))
|
|
dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg])
|
|
dns = _DataManager._bulk_get_or_create([foo_cfg, bar_cfg, baz_cfg])
|
|
foo = dns[foo_cfg]
|
|
foo = dns[foo_cfg]
|
|
bar = dns[bar_cfg]
|
|
bar = dns[bar_cfg]
|
|
baz = dns[baz_cfg]
|
|
baz = dns[baz_cfg]
|
|
task_1 = Task("by_2", {}, partial(lock_multiply, lock_1, 2), [foo], [bar])
|
|
task_1 = Task("by_2", {}, partial(lock_multiply, lock_1, 2), [foo], [bar])
|
|
task_2 = Task("by_3", {}, partial(lock_multiply, lock_2, 3), [bar], [baz])
|
|
task_2 = Task("by_3", {}, partial(lock_multiply, lock_2, 3), [bar], [baz])
|
|
- scenario = Scenario("scenario_config", [task_1, task_2], {})
|
|
|
|
-
|
|
|
|
|
|
+ scenario = Scenario("scenario_config", {task_1, task_2}, {})
|
|
assert task_1.foo.is_ready_for_reading # foo is ready
|
|
assert task_1.foo.is_ready_for_reading # foo is ready
|
|
assert not task_1.bar.is_ready_for_reading # But bar is not ready
|
|
assert not task_1.bar.is_ready_for_reading # But bar is not ready
|
|
assert not task_2.baz.is_ready_for_reading # neither does baz
|
|
assert not task_2.baz.is_ready_for_reading # neither does baz
|
|
-
|
|
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
with lock_2:
|
|
with lock_2:
|
|
with lock_1:
|
|
with lock_1:
|
|
@@ -287,23 +275,23 @@ def test_blocked_submittable():
|
|
tasks_jobs = {job._task.id: job for job in submission._jobs}
|
|
tasks_jobs = {job._task.id: job for job in submission._jobs}
|
|
job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id]
|
|
job_1, job_2 = tasks_jobs[task_1.id], tasks_jobs[task_2.id]
|
|
assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running
|
|
assert_true_after_time(job_1.is_running) # job 1 is submitted and locked so it is still running
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1
|
|
|
|
assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready
|
|
assert not _DataManager._get(task_1.bar.id).is_ready_for_reading # And bar still not ready
|
|
assert job_2.is_blocked # the job_2 remains blocked
|
|
assert job_2.is_blocked # the job_2 remains blocked
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
|
|
+ assert dispatcher._nb_available_workers == 1
|
|
assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete
|
|
assert_true_after_time(job_1.is_completed) # job1 unlocked and can complete
|
|
assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready
|
|
assert _DataManager._get(task_1.bar.id).is_ready_for_reading # bar becomes ready
|
|
assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written
|
|
assert _DataManager._get(task_1.bar.id).read() == 2 # the data is computed and written
|
|
assert_true_after_time(job_2.is_running) # And job 2 can start running
|
|
assert_true_after_time(job_2.is_running) # And job 2 can start running
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 1 # Still one process
|
|
|
|
# currently used since the previous process is not used anymore
|
|
# currently used since the previous process is not used anymore
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert len(_Orchestrator.blocked_jobs) == 0
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
assert_submission_status(submission, SubmissionStatus.RUNNING)
|
|
|
|
+ assert dispatcher._nb_available_workers == 1 # Still one process
|
|
assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete
|
|
assert_true_after_time(job_2.is_completed) # job 2 unlocked so it can complete
|
|
assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready
|
|
assert _DataManager._get(task_2.baz.id).is_ready_for_reading # baz becomes ready
|
|
assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written
|
|
assert _DataManager._get(task_2.baz.id).read() == 6 # the data is computed and written
|
|
- assert len(_OrchestratorFactory._dispatcher._dispatched_processes) == 0 # No more process used.
|
|
|
|
assert_submission_status(submission, SubmissionStatus.COMPLETED)
|
|
assert_submission_status(submission, SubmissionStatus.COMPLETED)
|
|
|
|
+ assert dispatcher._nb_available_workers == 2 # No more process used.
|
|
|
|
|
|
|
|
|
|
# ################################ UTIL METHODS ##################################
|
|
# ################################ UTIL METHODS ##################################
|