Browse Source

Merge branch 'develop' into feature/clean_test_orchetrator

jrobinAV 1 năm trước cách đây
mục cha
commit
7e434dfb46

+ 7 - 0
.editorconfig

@@ -11,6 +11,13 @@ insert_final_newline = true
 charset = utf-8
 charset = utf-8
 end_of_line = lf
 end_of_line = lf
 
 
+[*.yml]
+indent_style = space
+indent_size = 2
+charset = utf-8
+end_of_line = lf
+
+
 [*.bat]
 [*.bat]
 indent_style = tab
 indent_style = tab
 end_of_line = crlf
 end_of_line = crlf

+ 47 - 0
.github/ISSUE_TEMPLATE/bug_report.md

@@ -0,0 +1,47 @@
+---
+name: Bug report
+about: Bug reports help improve the product quality.
+title: BUG-
+labels: "\U0001F4A5Malfunction"
+assignees: ''
+
+---
+
+**Description**
+A complete and clear description of the problem.
+
+**How to reproduce**
+
+- A code fragment
+    ```
+    from taipy import ...
+    ```
+
+- And/or configuration files or code:
+    ```
+    from taipy import Config;
+
+
+    Config.configure_data_node(...)
+    ...
+    ```
+
+- And/or Taipy GUI Markdown or HTML files
+
+**Expected behavior**
+Description of what would be the expected outcome.
+
+**Screenshots**
+When available and relevant, screenshots to better highlight the problem.
+
+**Runtime environment**
+Please specify relevant indications.
+ - Taipy version (or branch name):
+ - OS: [e.g. Linux, Windows] and version
+ - Browser: [e.g. Chrome, Edge, Safari] and version (if relevant)
+and any other relevant information.
+
+**Acceptance Criteria**
+- [ ] Ensure new code is unit tested, and check code coverage is at least 90%
+- [ ] Create related issue in taipy-doc for documentation and Release Notes if relevant
+

+ 16 - 0
.github/ISSUE_TEMPLATE/feature-improvement.md

@@ -0,0 +1,16 @@
+---
+name: Feature improvement
+about: Feature improvements add extra functionality to an existing feature.
+title: ''
+labels: "\U0001F4C8 Improvement"
+assignees: ''
+
+---
+
+**Description**
+What this improvement addresses (performance, API...).
+
+**Acceptance Criteria**
+- [ ] Ensure new code is unit tested, and check code coverage is at least 90%
+- [ ] Propagate any change on the demos and run all of them to ensure there is no breaking change
+- [ ] Ensure any change is well documented

+ 27 - 0
.github/ISSUE_TEMPLATE/new-feature.md

@@ -0,0 +1,27 @@
+---
+name: New feature
+about: Suggest a new feature for the product.
+title: ''
+labels: "✨New feature"
+assignees: ''
+
+---
+
+**What would that feature address**
+Description of the lacking functionality that this issue would address.
+Ex: It is not possible to do this or that...
+
+***Description of the ideal solution***
+What would be the best way to provide that functionality
+
+***Caveats***
+What impact could that feature have on the rest of the product, and should be taken special care of?
+
+***Other options***
+What else could we do (workaround, third-party...)?
+
+**Acceptance Criteria**
+- [ ] Ensure new code is unit tested, and check code coverage is at least 90%
+- [ ] Create related issue in taipy-doc for documentation and Release Notes
+- [ ] Check if a new demo could be provided based on this, or if legacy demos could be benefit from it
+- [ ] Ensure any change is well documented

+ 0 - 19
.github/sync.yml

@@ -1,19 +0,0 @@
-group:
-  repos: |
-    Avaiga/taipy-config
-    Avaiga/taipy-core
-    Avaiga/taipy-gui
-    Avaiga/taipy-rest
-
-  files:
-    - .flake8
-    - .gitattributes
-    - .license-header
-    - CODE_OF_CONDUCT.md
-    - CONTRIBUTING.md
-    - LICENSE
-    - mypy.ini
-    - pyproject.toml
-    - taipy/__init__.py
-    # - .isort.cfg
-    # - __init__.py

+ 0 - 27
.github/workflows/linter.yml

@@ -1,27 +0,0 @@
-name: Python linter and type checker
-
-on:
-  push:
-    branches: [ develop, dev/*, release/* ]
-  pull_request:
-    branches: [ develop, dev/*, release/* ]
-
-jobs:
-  linter:
-    timeout-minutes: 20
-    strategy:
-      fail-fast: false
-      matrix:
-        language: [ 'python' ]
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - uses: ricardochaves/python-lint@v1.4.0
-        with:
-          use-pylint: false
-          use-isort: false
-          use-mypy: false
-          extra-black-options: "--line-length=120 --diff"
-          extra-pycodestyle-options: "--max-line-length=120  --exclude=tests/gui --ignore=E121,E123,E126,E226,E24,E704,W503,W504,E203"
-          extra-mypy-options: "--ignore-missing-imports --implicit-optional --no-namespace-packages --exclude (taipy/templates/|generate_pyi.py) --follow-imports skip"
-          extra-isort-options: "--line-length=120 --force-grid-wrap=10 --multi-line=VERTICAL_HANGING_INDENT --trailing-comma"

+ 59 - 0
.github/workflows/overall-tests.yml

@@ -0,0 +1,59 @@
+name: Overall Test Workflow
+
+on:
+  pull_request_review:
+    types: [submitted]
+
+jobs:
+  tests:
+    if: github.event.review.state == 'approved'
+    timeout-minutes: 40
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.8', '3.9', '3.10', '3.11']
+        os: [ubuntu-latest, windows-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{matrix.python-version}}
+
+      - name: Install pipenv
+        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
+
+      - name: Install Dependencies
+        run: pipenv install --dev --python=${{ matrix.python-version }}
+
+      - name: Setup LibMagic (MacOS)
+        if: matrix.os == 'macos-latest'
+        run: brew install libmagic
+
+      - uses: actions/setup-node@v4
+        with:
+          node-version: 20
+      - name: Frontend Bundle Build
+        run: pipenv run python tools/frontend/bundle_build.py
+
+      - name: Install Playwright
+        run: pipenv run playwright install chromium --with-deps
+
+      - name: Pytest
+        run: pipenv run pytest --cov=taipy --cov-append --cov-report="xml:overall-coverage.xml" --cov-report term-missing tests
+
+      - name: Coverage
+        if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
+        uses: MishaKav/pytest-coverage-comment@main
+        with:
+          pytest-xml-coverage-path: ./overall-coverage.xml
+          title: Taipy Overall Coverage Report
+
+      - name: Notify user if failed
+        if: failure() && github.event_name == 'workflow_dispatch'
+        run: |
+          if [[ -n "${{ github.event.inputs.user-to-notify }}" ]]; then
+            curl "${{ secrets.notify_endpoint }}" -d '{"username": "${{ github.event.inputs.user-to-notify }}", "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" }' -H "Content-Type: application/json"
+          fi
+        shell: bash

+ 2 - 2
.github/workflows/packaging.yml

@@ -23,8 +23,8 @@ jobs:
     runs-on: ${{ matrix.os }}
     runs-on: ${{ matrix.os }}
 
 
     steps:
     steps:
-      - uses: actions/checkout@v3
-      - uses: actions/setup-python@v4
+      - uses: actions/checkout@v4
+      - uses: actions/setup-python@v5
         with:
         with:
           python-version: ${{ matrix.python-versions }}
           python-version: ${{ matrix.python-versions }}
 
 

+ 112 - 0
.github/workflows/partial-tests.yml

@@ -0,0 +1,112 @@
+name: Partial Tests Workflow
+
+on:
+  push:
+    branches: [ develop, dev/*, release/* ]
+  pull_request:
+    branches: [ develop, dev/*, release/* ]
+
+jobs:
+  linter:
+    runs-on: ubuntu-latest
+    steps:
+      # replace for Ruff in the future
+      - uses: actions/checkout@v4
+      - uses: ricardochaves/python-lint@v1.4.0
+        with:
+          use-pylint: false
+          use-isort: false
+          use-mypy: false
+          extra-black-options: "--line-length=120 --diff"
+          extra-pycodestyle-options: "--max-line-length=120  --exclude=tests/gui --ignore=E121,E123,E126,E226,E24,E704,W503,W504,E203"
+          extra-mypy-options: "--ignore-missing-imports --implicit-optional --no-namespace-packages --exclude (taipy/templates/|generate_pyi.py|tools) --follow-imports skip"
+          extra-isort-options: "--line-length=120 --force-grid-wrap=10 --multi-line=VERTICAL_HANGING_INDENT --trailing-comma"
+  tests:
+    needs: linter
+    timeout-minutes: 40
+    strategy:
+      fail-fast: false
+      matrix:
+        python-version: ['3.8', '3.9', '3.10', '3.11']
+        os: [ubuntu-latest, windows-latest, macos-latest]
+    runs-on: ${{ matrix.os }}
+    steps:
+      - uses: actions/checkout@v4
+
+      - uses: dorny/paths-filter@v2
+        id: changes
+        with:
+          filters: |
+            cli:
+              - 'taipy/_cli/**'
+            config:
+              - 'taipy/config/**'
+            core:
+              - 'taipy/core/**'
+            gui:
+              - 'taipy/gui/**'
+            gui-core:
+              - 'taipy/gui_core/**'
+            logger:
+              - 'taipy/logger/**'
+            rest:
+              - 'taipy/rest/**'
+            templates:
+              - 'taipy/templates/**'
+
+      - uses: actions/setup-python@v5
+        with:
+          python-version: ${{matrix.python-version}}
+
+      - name: Install pipenv
+        run: curl https://raw.githubusercontent.com/pypa/pipenv/master/get-pipenv.py | python
+
+      - name: Install Dependencies
+        run: pipenv install --dev --python=${{ matrix.python-version }}
+
+      - name: Setup LibMagic (MacOS)
+        if: matrix.os == 'macos-latest'
+        run: brew install libmagic
+
+      - uses: actions/setup-node@v4
+        with:
+          node-version: 20
+      - name: Frontend Bundle Build
+        if: steps.changes.outputs.gui == 'true' || steps.changes.outputs.gui-core == 'true'
+        run: pipenv run python tools/frontend/bundle_build.py
+
+      - name: Install Playwright
+        if: steps.changes.outputs.gui == 'true' || steps.changes.outputs.gui-core == 'true'
+        run: pipenv run playwright install chromium --with-deps
+
+      - name: Pytest CLI
+        if: steps.changes.outputs.cli == 'true'
+        run: pipenv run pytest  tests/cli
+
+      - name: Pytest Config
+        if: steps.changes.outputs.config == 'true'
+        run: pipenv run pytest  tests/config
+
+      - name: Pytest Core
+        if: steps.changes.outputs.core == 'true'
+        run: pipenv run pytest  tests/core
+
+      - name: Pytest GUI
+        if: steps.changes.outputs.gui == 'true'
+        run: pipenv run pytest  tests/gui
+
+      - name: Pytest GUI Core
+        if: steps.changes.outputs.gui-core == 'true'
+        run: pipenv run pytest  tests/gui_core
+
+      - name: Pytest Logger
+        if: steps.changes.outputs.logger == 'true'
+        run: pipenv run pytest  tests/logger
+
+      - name: Pytest Rest
+        if: steps.changes.outputs.rest == 'true'
+        run: pipenv run pytest  tests/rest
+
+      - name: Pytest Rest
+        if: steps.changes.outputs.templates == 'true'
+        run: pipenv run pytest  tests/templates

+ 0 - 17
.github/workflows/sync.yml

@@ -1,17 +0,0 @@
-name: Sync Files
-on:
-  push:
-    branches:
-      - develop
-  workflow_dispatch:
-jobs:
-  sync:
-    runs-on: ubuntu-latest
-    steps:
-      - name: Checkout Repository
-        uses: actions/checkout@develop
-      - name: Run GitHub File Sync
-        uses: BetaHuhn/repo-file-sync-action@v1
-        with:
-          GH_PAT: ${{ secrets.GH_PAT }}
-

+ 0 - 45
.github/workflows/tests.yml

@@ -1,45 +0,0 @@
-name: Python tests
-
-on:
-  push:
-    branches: [ develop, dev/*, release/* ]
-  pull_request:
-    branches: [ develop, dev/*, release/* ]
-
-jobs:
-  backend:
-    timeout-minutes: 40
-    strategy:
-      fail-fast: false
-      matrix:
-        python-versions: ['3.8', '3.9', '3.10', '3.11']
-        os: [ubuntu-latest, windows-latest, macos-latest]
-    runs-on: ${{ matrix.os }}
-
-    steps:
-      - uses: actions/checkout@v3
-      - uses: actions/setup-python@v4
-        with:
-          python-version: ${{ matrix.python-versions }}
-      - uses: actions/setup-node@v4
-        with:
-          node-version: '20'
-
-
-      - name: install libmagic on macos
-        if: matrix.os == 'macos-latest'
-        run: brew install libmagic
-
-      - name: Tests
-        if: matrix.os != 'windows-latest' || matrix.python-versions != '3.8'
-        run: |
-          pip install tox
-          tox -e tests
-
-      - name: Notify user if failed
-        if: failure() && github.event_name == 'workflow_dispatch'
-        run: |
-          if [[ -n "${{ github.event.inputs.user-to-notify }}" ]]; then
-            curl "${{ secrets.notify_endpoint }}" -d '{"username": "${{ github.event.inputs.user-to-notify }}", "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" }' -H "Content-Type: application/json"
-          fi
-        shell: bash

+ 3 - 1
Pipfile

@@ -34,6 +34,7 @@ sqlalchemy = "==2.0.16"
 toml = "==0.10"
 toml = "==0.10"
 twisted = "==23.8.0"
 twisted = "==23.8.0"
 tzlocal = "==3.0"
 tzlocal = "==3.0"
+boto3 = "==1.29.1"
 
 
 [dev-packages]
 [dev-packages]
 autopep8 = "*"
 autopep8 = "*"
@@ -58,7 +59,7 @@ pandas-stubs = "*"
 playwright = "*"
 playwright = "*"
 pre-commit = "*"
 pre-commit = "*"
 pyopenssl = "*"
 pyopenssl = "*"
-pytest = "*"
+pytest = "==7.4.3"
 pytest-cov = "*"
 pytest-cov = "*"
 pytest-mock = "*"
 pytest-mock = "*"
 pytest-playwright = "*"
 pytest-playwright = "*"
@@ -74,6 +75,7 @@ types-python-dateutil = "*"
 types-pytz = "*"
 types-pytz = "*"
 types-toml = ">=0.10.0"
 types-toml = ">=0.10.0"
 types-tzlocal = "*"
 types-tzlocal = "*"
+moto = ">=4.2.9"
 
 
 [requires]
 [requires]
 python_version = "3"
 python_version = "3"

+ 1 - 0
taipy/core/config/__init__.py

@@ -54,6 +54,7 @@ _inject_section(
         ("configure_pickle_data_node", DataNodeConfig._configure_pickle),
         ("configure_pickle_data_node", DataNodeConfig._configure_pickle),
         ("configure_excel_data_node", DataNodeConfig._configure_excel),
         ("configure_excel_data_node", DataNodeConfig._configure_excel),
         ("configure_generic_data_node", DataNodeConfig._configure_generic),
         ("configure_generic_data_node", DataNodeConfig._configure_generic),
+        ("configure_s3_object_data_node", DataNodeConfig._configure_s3_object),
     ],
     ],
 )
 )
 _inject_section(
 _inject_section(

+ 2 - 1
taipy/core/config/checkers/_data_node_config_checker.py

@@ -44,7 +44,8 @@ class _DataNodeConfigChecker(_ConfigChecker):
                 data_node_config._STORAGE_TYPE_KEY,
                 data_node_config._STORAGE_TYPE_KEY,
                 data_node_config.storage_type,
                 data_node_config.storage_type,
                 f"`{data_node_config._STORAGE_TYPE_KEY}` field of DataNodeConfig `{data_node_config_id}` must be"
                 f"`{data_node_config._STORAGE_TYPE_KEY}` field of DataNodeConfig `{data_node_config_id}` must be"
-                f" either csv, sql_table, sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory.",
+                f" either csv, sql_table, sql, mongo_collection, pickle, excel, generic, json, parquet, s3_object,"
+                f" or in_memory.",
             )
             )
 
 
     def _check_scope(self, data_node_config_id: str, data_node_config: DataNodeConfig):
     def _check_scope(self, data_node_config_id: str, data_node_config: DataNodeConfig):

+ 43 - 1
taipy/core/config/config.schema.json

@@ -99,7 +99,8 @@
               "in_memory",
               "in_memory",
               "generic",
               "generic",
               "parquet",
               "parquet",
-              ""
+              "s3_object",
+              "",
             ],
             ],
             "default": "pickle"
             "default": "pickle"
           },
           },
@@ -236,6 +237,30 @@
             "description": "storage_type: parquet specific.Additional parameters when writing parquet files, default is an empty dictionary",
             "description": "storage_type: parquet specific.Additional parameters when writing parquet files, default is an empty dictionary",
             "type": "object"
             "type": "object"
           },
           },
+          "aws_access_key":{
+            "description": "storage_type: s3_object specific.Amazon Storage public key",
+            "type": "string"
+          },
+          "aws_secret_access_key":{
+            "description": "storage_type: s3_object specific.Amazon Storage secret key",
+            "type": "string"
+          },
+          "aws_s3_bucket_name":{
+            "description": "storage_type: s3_object specific.Name of Bucket",
+            "type": "string"
+          },
+          "aws_s3_object_key":{
+            "description": "storage_type: s3_object specific.File name",
+            "type": "string"
+          },
+          "aws_region":{
+            "description": "storage_type: s3_object specific.Bucket Location",
+            "type": "string"
+          },
+          "aws_s3_object_parameters":{
+            "description": "storage_type: s3_object specific.Additional parameters when accessing s3 object, default is an empty dictionary",
+            "type": "array"
+          },
           "if": {
           "if": {
             "properties": {
             "properties": {
               "storage_type": {
               "storage_type": {
@@ -282,6 +307,23 @@
                 "required": [
                 "required": [
                   "db_name"
                   "db_name"
                 ],
                 ],
+            "else": {
+              "if": {
+                "properties": {
+                  "storage_type": {
+                    "enum": [
+                      "s3_object",
+                    ]
+                  }
+                }
+              },
+              "then": {
+                "required": [
+                  "aws_access_key",
+                  "aws_secret_access_key",
+                  "aws_s3_bucket_name",
+                  "aws_s3_object_key"
+                ],
                 "if": {
                 "if": {
                   "properties": {
                   "properties": {
                     "storage_type": {
                     "storage_type": {

+ 78 - 4
taipy/core/config/data_node_config.py

@@ -35,8 +35,8 @@ class DataNodeConfig(Section):
     Attributes:
     Attributes:
         id (str): Unique identifier of the data node config. It must be a valid Python variable name.
         id (str): Unique identifier of the data node config. It must be a valid Python variable name.
         storage_type (str): Storage type of the data nodes created from the data node config. The possible values
         storage_type (str): Storage type of the data nodes created from the data node config. The possible values
-            are : "csv", "excel", "pickle", "sql_table", "sql", "mongo_collection", "generic", "json", "parquet" and
-            "in_memory".
+            are : "csv", "excel", "pickle", "sql_table", "sql", "mongo_collection", "generic", "json", "parquet",
+            "in_memory and "s3_object".
             The default value is "pickle".
             The default value is "pickle".
             Note that the "in_memory" value can only be used when `JobConfig^`.mode is "standalone".
             Note that the "in_memory" value can only be used when `JobConfig^`.mode is "standalone".
         scope (Optional[Scope^]): The optional `Scope^` of the data nodes instantiated from the data node config.
         scope (Optional[Scope^]): The optional `Scope^` of the data nodes instantiated from the data node config.
@@ -57,6 +57,8 @@ class DataNodeConfig(Section):
     _STORAGE_TYPE_VALUE_GENERIC = "generic"
     _STORAGE_TYPE_VALUE_GENERIC = "generic"
     _STORAGE_TYPE_VALUE_JSON = "json"
     _STORAGE_TYPE_VALUE_JSON = "json"
     _STORAGE_TYPE_VALUE_PARQUET = "parquet"
     _STORAGE_TYPE_VALUE_PARQUET = "parquet"
+    _STORAGE_TYPE_VALUE_S3_OBJECT = "s3_object"
+
     _DEFAULT_STORAGE_TYPE = _STORAGE_TYPE_VALUE_PICKLE
     _DEFAULT_STORAGE_TYPE = _STORAGE_TYPE_VALUE_PICKLE
     _ALL_STORAGE_TYPES = [
     _ALL_STORAGE_TYPES = [
         _STORAGE_TYPE_VALUE_PICKLE,
         _STORAGE_TYPE_VALUE_PICKLE,
@@ -69,6 +71,7 @@ class DataNodeConfig(Section):
         _STORAGE_TYPE_VALUE_GENERIC,
         _STORAGE_TYPE_VALUE_GENERIC,
         _STORAGE_TYPE_VALUE_JSON,
         _STORAGE_TYPE_VALUE_JSON,
         _STORAGE_TYPE_VALUE_PARQUET,
         _STORAGE_TYPE_VALUE_PARQUET,
+        _STORAGE_TYPE_VALUE_S3_OBJECT,
     ]
     ]
 
 
     _EXPOSED_TYPE_KEY = "exposed_type"
     _EXPOSED_TYPE_KEY = "exposed_type"
@@ -145,6 +148,13 @@ class DataNodeConfig(Section):
     _OPTIONAL_COMPRESSION_PARQUET_PROPERTY = "compression"
     _OPTIONAL_COMPRESSION_PARQUET_PROPERTY = "compression"
     _OPTIONAL_READ_KWARGS_PARQUET_PROPERTY = "read_kwargs"
     _OPTIONAL_READ_KWARGS_PARQUET_PROPERTY = "read_kwargs"
     _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY = "write_kwargs"
     _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY = "write_kwargs"
+    # S3object
+    _REQUIRED_AWS_ACCESS_KEY_ID_PROPERTY = "aws_access_key"
+    _REQUIRED_AWS_SECRET_ACCESS_KEY_PROPERTY = "aws_secret_access_key"
+    _REQUIRED_AWS_STORAGE_BUCKET_NAME_PROPERTY = "aws_s3_bucket_name"
+    _REQUIRED_AWS_S3_OBJECT_KEY_PROPERTY = "aws_s3_object_key"
+    _OPTIONAL_AWS_REGION_PROPERTY = "aws_region"
+    _OPTIONAL_AWS_S3_OBJECT_PARAMETERS_PROPERTY = "aws_s3_object_parameters"
 
 
     _REQUIRED_PROPERTIES: Dict[str, List] = {
     _REQUIRED_PROPERTIES: Dict[str, List] = {
         _STORAGE_TYPE_VALUE_PICKLE: [],
         _STORAGE_TYPE_VALUE_PICKLE: [],
@@ -169,6 +179,12 @@ class DataNodeConfig(Section):
         _STORAGE_TYPE_VALUE_GENERIC: [],
         _STORAGE_TYPE_VALUE_GENERIC: [],
         _STORAGE_TYPE_VALUE_JSON: [],
         _STORAGE_TYPE_VALUE_JSON: [],
         _STORAGE_TYPE_VALUE_PARQUET: [],
         _STORAGE_TYPE_VALUE_PARQUET: [],
+        _STORAGE_TYPE_VALUE_S3_OBJECT: [
+            _REQUIRED_AWS_ACCESS_KEY_ID_PROPERTY,
+            _REQUIRED_AWS_SECRET_ACCESS_KEY_PROPERTY,
+            _REQUIRED_AWS_STORAGE_BUCKET_NAME_PROPERTY,
+            _REQUIRED_AWS_S3_OBJECT_KEY_PROPERTY,
+        ],
     }
     }
 
 
     _OPTIONAL_PROPERTIES = {
     _OPTIONAL_PROPERTIES = {
@@ -241,6 +257,10 @@ class DataNodeConfig(Section):
             _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY: None,
             _OPTIONAL_WRITE_KWARGS_PARQUET_PROPERTY: None,
             _OPTIONAL_EXPOSED_TYPE_PARQUET_PROPERTY: _DEFAULT_EXPOSED_TYPE,
             _OPTIONAL_EXPOSED_TYPE_PARQUET_PROPERTY: _DEFAULT_EXPOSED_TYPE,
         },
         },
+        _STORAGE_TYPE_VALUE_S3_OBJECT: {
+            _OPTIONAL_AWS_REGION_PROPERTY: None,
+            _OPTIONAL_AWS_S3_OBJECT_PARAMETERS_PROPERTY: None,
+        },
     }
     }
 
 
     _SCOPE_KEY = "scope"
     _SCOPE_KEY = "scope"
@@ -380,8 +400,8 @@ class DataNodeConfig(Section):
         Parameters:
         Parameters:
             storage_type (str): The default storage type for all data node configurations.
             storage_type (str): The default storage type for all data node configurations.
                 The possible values are *"pickle"* (the default value), *"csv"*, *"excel"*,
                 The possible values are *"pickle"* (the default value), *"csv"*, *"excel"*,
-                *"sql"*, *"mongo_collection"*, *"in_memory"*, *"json"*, *"parquet"* or
-                *"generic"*.
+                *"sql"*, *"mongo_collection"*, *"in_memory"*, *"json"*, *"parquet"*, *"generic"*,
+                or *"s3_object"*.
             scope (Optional[Scope^]): The default scope for all data node configurations.<br/>
             scope (Optional[Scope^]): The default scope for all data node configurations.<br/>
                 The default value is `Scope.SCENARIO`.
                 The default value is `Scope.SCENARIO`.
             validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be
             validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be
@@ -465,6 +485,7 @@ class DataNodeConfig(Section):
             cls._STORAGE_TYPE_VALUE_GENERIC: cls._configure_generic,
             cls._STORAGE_TYPE_VALUE_GENERIC: cls._configure_generic,
             cls._STORAGE_TYPE_VALUE_JSON: cls._configure_json,
             cls._STORAGE_TYPE_VALUE_JSON: cls._configure_json,
             cls._STORAGE_TYPE_VALUE_PARQUET: cls._configure_parquet,
             cls._STORAGE_TYPE_VALUE_PARQUET: cls._configure_parquet,
+            cls._STORAGE_TYPE_VALUE_S3_OBJECT: cls._configure_s3_object,
         }
         }
 
 
         if storage_type in cls._ALL_STORAGE_TYPES:
         if storage_type in cls._ALL_STORAGE_TYPES:
@@ -1030,6 +1051,59 @@ class DataNodeConfig(Section):
             id, DataNodeConfig._STORAGE_TYPE_VALUE_MONGO_COLLECTION, scope, validity_period, **properties
             id, DataNodeConfig._STORAGE_TYPE_VALUE_MONGO_COLLECTION, scope, validity_period, **properties
         )
         )
 
 
+    @classmethod
+    def _configure_s3_object(
+        cls,
+        id: str,
+        aws_access_key: str,
+        aws_secret_access_key: str,
+        aws_s3_bucket_name: str,
+        aws_s3_object_key: str,
+        aws_region: Optional[str] = None,
+        aws_s3_object_parameters: Optional[Dict[str, Any]] = None,
+        scope: Optional[Scope] = None,
+        validity_period: Optional[timedelta] = None,
+        **properties,
+    ) -> "DataNodeConfig":
+        """Configure a new S3 object data node configuration.
+
+        Parameters:
+            id (str): The unique identifier of the new S3 Object data node configuration.
+            aws_access_key (str): Amazon Web Services ID for to identify account.
+            aws_secret_access_key (str): Amazon Web Services access key to authenticate programmatic requests.
+            aws_s3_bucket_name (str): The bucket in S3 to read from and to write the data to.
+            aws_region (Optional[str]): Self-contained geographic area where Amazon Web Services (AWS)
+                infrastructure is located.
+            aws_s3_object_parameters (Optional[dict[str, any]]): A dictionary of additional arguments to be passed
+                into AWS S3 bucket access string.
+            scope (Optional[Scope^]): The scope of the S3 Object data node configuration.<br/>
+                The default value is `Scope.SCENARIO`.
+            validity_period (Optional[timedelta]): The duration since the last edit date for which the data node can be
+                considered up-to-date. Once the validity period has passed, the data node is considered stale and
+                relevant tasks will run even if they are skippable (see the
+                [Task configs page](../core/config/task-config.md) for more details).
+                If *validity_period* is set to None, the data node is always up-to-date.
+            **properties (dict[str, any]): A keyworded variable length list of additional arguments.
+
+        Returns:
+            The new S3 object data node configuration.
+        """
+        properties.update(
+            {
+                cls._REQUIRED_AWS_ACCESS_KEY_ID_PROPERTY: aws_access_key,
+                cls._REQUIRED_AWS_SECRET_ACCESS_KEY_PROPERTY: aws_secret_access_key,
+                cls._REQUIRED_AWS_STORAGE_BUCKET_NAME_PROPERTY: aws_s3_bucket_name,
+                cls._REQUIRED_AWS_S3_OBJECT_KEY_PROPERTY: aws_s3_object_key,
+            }
+        )
+
+        if aws_region is not None:
+            properties[cls._OPTIONAL_AWS_REGION_PROPERTY] = aws_region
+        if aws_s3_object_parameters is not None:
+            properties[cls._OPTIONAL_AWS_S3_OBJECT_PARAMETERS_PROPERTY] = aws_s3_object_parameters
+
+        return cls.__configure(id, DataNodeConfig._STORAGE_TYPE_VALUE_S3_OBJECT, scope, validity_period, **properties)
+
     @staticmethod
     @staticmethod
     def __configure(
     def __configure(
         id: str,
         id: str,

+ 1 - 0
taipy/core/data/__init__.py

@@ -21,3 +21,4 @@ from .parquet import ParquetDataNode
 from .pickle import PickleDataNode
 from .pickle import PickleDataNode
 from .sql import SQLDataNode
 from .sql import SQLDataNode
 from .sql_table import SQLTableDataNode
 from .sql_table import SQLTableDataNode
+from .aws_s3 import S3ObjectDataNode

+ 156 - 0
taipy/core/data/aws_s3.py

@@ -0,0 +1,156 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+import boto3
+from datetime import datetime, timedelta
+from inspect import isclass
+from typing import Any, Dict, List, Optional, Set, Tuple, Union
+
+from taipy.config.common.scope import Scope
+
+from .._version._version_manager_factory import _VersionManagerFactory
+from ..exceptions.exceptions import InvalidCustomDocument, MissingRequiredProperty
+from .data_node import DataNode
+from .data_node_id import DataNodeId, Edit
+
+
+class S3ObjectDataNode(DataNode):
+    """Data Node object stored in an Amazon Web Service S3 Bucket.
+
+    Attributes:
+        config_id (str): Identifier of the data node configuration. It must be a valid Python
+            identifier.
+        scope (Scope^): The scope of this data node.
+        id (str): The unique identifier of this data node.
+        owner_id (str): The identifier of the owner (sequence_id, scenario_id, cycle_id) or
+            None.
+        parent_ids (Optional[Set[str]]): The identifiers of the parent tasks or `None`.
+        last_edit_date (datetime): The date and time of the last modification.
+        edits (List[Edit^]): The ordered list of edits for that job.
+        version (str): The string indicates the application version of the data node to instantiate. If not provided,
+            the current version is used.
+        validity_period (Optional[timedelta]): The duration implemented as a timedelta since the last edit date for
+            which the data node can be considered up-to-date. Once the validity period has passed, the data node is
+            considered stale and relevant tasks will run even if they are skippable (see the
+            [Task management page](../core/entities/task-mgt.md) for more details).
+            If _validity_period_ is set to `None`, the data node is always up-to-date.
+        edit_in_progress (bool): True if a task computing the data node has been submitted
+            and not completed yet. False otherwise.
+        editor_id (Optional[str]): The identifier of the user who is currently editing the data node.
+        editor_expiration_date (Optional[datetime]): The expiration date of the editor lock.
+        properties (dict[str, Any]): A dictionary of additional properties. Note that the
+            _properties_ parameter must at least contain an entry for _"aws_access_key"_ , _"aws_secret_access_key"_ ,
+            _aws_s3_bucket_name_ and _aws_s3_object_key_ :
+
+            - _"aws_access_key"_ `(str)`: Amazon Web Services ID for to identify account\n
+            - _"aws_secret_access_key"_ `(str)`: Amazon Web Services access key to authenticate programmatic requests.\n
+            - _"aws_region"_ `(Any)`: Self-contained geographic area where Amazon Web Services (AWS) infrastructure is
+                    located.\n
+            - _"aws_s3_bucket_name"_ `(str)`: unique identifier for a container that stores objects in Amazon Simple
+                    Storage Service (S3).\n
+            - _"aws_s3_object_key"_ `(str)`:  unique idntifier for the name of the object(file) that has to be read
+                    or written. \n
+            - _"aws _s3_object_parameters"_ `(str)`: A dictionary of additional arguments to be passed to interact with
+                    the AWS service\n
+    """
+
+    __STORAGE_TYPE = "s3_object"
+
+    __AWS_ACCESS_KEY_ID = "aws_access_key"
+    __AWS_SECRET_ACCESS_KEY = "aws_secret_access_key"
+    __AWS_STORAGE_BUCKET_NAME = "aws_s3_bucket_name"
+    __AWS_S3_OBJECT_KEY = "aws_s3_object_key"
+    __AWS_REGION = "aws_region"
+    __AWS_S3_OBJECT_PARAMETERS = "aws_s3_object_parameters"
+
+    _REQUIRED_PROPERTIES: List[str] = [
+        __AWS_ACCESS_KEY_ID,
+        __AWS_SECRET_ACCESS_KEY,
+        __AWS_STORAGE_BUCKET_NAME,
+        __AWS_S3_OBJECT_KEY,
+    ]
+
+    def __init__(
+        self,
+        config_id: str,
+        scope: Scope,
+        id: Optional[DataNodeId] = None,
+        owner_id: Optional[str] = None,
+        parent_ids: Optional[Set[str]] = None,
+        last_edit_date: Optional[datetime] = None,
+        edits: Optional[List[Edit]] = None,
+        version: str = None,
+        validity_period: Optional[timedelta] = None,
+        edit_in_progress: bool = False,
+        editor_id: Optional[str] = None,
+        editor_expiration_date: Optional[datetime] = None,
+        properties: Optional[Dict] = None,
+    ):
+        if properties is None:
+            properties = {}
+        required = self._REQUIRED_PROPERTIES
+        if missing := set(required) - set(properties.keys()):
+            raise MissingRequiredProperty(
+                f"The following properties " f"{', '.join(x for x in missing)} were not informed and are required."
+            )
+        super().__init__(
+            config_id,
+            scope,
+            id,
+            owner_id,
+            parent_ids,
+            last_edit_date,
+            edits,
+            version or _VersionManagerFactory._build_manager()._get_latest_version(),
+            validity_period,
+            edit_in_progress,
+            editor_id,
+            editor_expiration_date,
+            **properties,
+        )
+
+        self._s3_client = boto3.client(
+            "s3",
+            aws_access_key_id=properties.get(self.__AWS_ACCESS_KEY_ID),
+            aws_secret_access_key=properties.get(self.__AWS_SECRET_ACCESS_KEY),
+        )
+
+        if not self._last_edit_date:
+            self._last_edit_date = datetime.now()
+
+        self._TAIPY_PROPERTIES.update(
+            {
+                self.__AWS_ACCESS_KEY_ID,
+                self.__AWS_SECRET_ACCESS_KEY,
+                self.__AWS_STORAGE_BUCKET_NAME,
+                self.__AWS_S3_OBJECT_KEY,
+                self.__AWS_REGION,
+                self.__AWS_S3_OBJECT_PARAMETERS,
+            }
+        )
+
+    @classmethod
+    def storage_type(cls) -> str:
+        return cls.__STORAGE_TYPE
+
+    def _read(self):
+        aws_s3_object = self._s3_client.get_object(
+            Bucket=self.properties[self.__AWS_STORAGE_BUCKET_NAME],
+            Key=self.properties[self.__AWS_S3_OBJECT_KEY],
+        )
+        return aws_s3_object["Body"].read().decode("utf-8")
+
+    def _write(self, data: Any):
+        self._s3_client.put_object(
+            Bucket=self.properties[self.__AWS_STORAGE_BUCKET_NAME],
+            Key=self.properties[self.__AWS_S3_OBJECT_KEY],
+            Body=data,
+        )

+ 1 - 1
taipy/core/taipy.py

@@ -625,7 +625,7 @@ def get_jobs() -> List[Job]:
     return _JobManagerFactory._build_manager()._get_all()
     return _JobManagerFactory._build_manager()._get_all()
 
 
 
 
-def delete_job(job: Job, force=False):
+def delete_job(job: Job, force: Optional[bool] = False):
     """Delete a job.
     """Delete a job.
 
 
     This function deletes the specified job. If the job is not completed and
     This function deletes the specified job. If the job is not completed and

+ 3 - 2
taipy/gui/extension/library.py

@@ -39,7 +39,7 @@ class ElementProperty:
 
 
     def __init__(
     def __init__(
         self,
         self,
-        property_type: PropertyType,
+        property_type: t.Union[PropertyType, t.Type[_TaipyBase]],
         default_value: t.Optional[t.Any] = None,
         default_value: t.Optional[t.Any] = None,
         js_name: t.Optional[str] = None,
         js_name: t.Optional[str] = None,
     ) -> None:
     ) -> None:
@@ -54,6 +54,7 @@ class ElementProperty:
                 JavaScript code.
                 JavaScript code.
         """
         """
         self.default_value = default_value
         self.default_value = default_value
+        self.property_type: t.Union[PropertyType, t.Type[_TaipyBase]]
         if property_type == PropertyType.broadcast:
         if property_type == PropertyType.broadcast:
             if isinstance(default_value, str):
             if isinstance(default_value, str):
                 self.default_value = _get_broadcast_var_name(default_value)
                 self.default_value = _get_broadcast_var_name(default_value)
@@ -205,7 +206,7 @@ class Element:
             if default_attr is not None:
             if default_attr is not None:
                 elt_built.set_value_and_default(
                 elt_built.set_value_and_default(
                     var_name=default_name,
                     var_name=default_name,
-                    var_type=default_attr.property_type,
+                    var_type=t.cast(PropertyType, default_attr.property_type),
                     default_val=default_attr.default_value,
                     default_val=default_attr.default_value,
                     with_default=default_attr.property_type != PropertyType.data,
                     with_default=default_attr.property_type != PropertyType.data,
                 )
                 )

+ 67 - 108
taipy/gui_core/_context.py

@@ -12,7 +12,6 @@
 import json
 import json
 import typing as t
 import typing as t
 from collections import defaultdict
 from collections import defaultdict
-from enum import Enum
 from numbers import Number
 from numbers import Number
 from threading import Lock
 from threading import Lock
 
 
@@ -23,8 +22,21 @@ except ImportError:
 
 
 import pandas as pd
 import pandas as pd
 from dateutil import parser
 from dateutil import parser
+
 from taipy.config import Config
 from taipy.config import Config
-from taipy.core import Cycle, DataNode, Job, Scenario, Sequence, cancel_job, create_scenario
+from taipy.core import (
+    Cycle,
+    DataNode,
+    DataNodeId,
+    Job,
+    JobId,
+    Scenario,
+    ScenarioId,
+    Sequence,
+    SequenceId,
+    cancel_job,
+    create_scenario,
+)
 from taipy.core import delete as core_delete
 from taipy.core import delete as core_delete
 from taipy.core import delete_job
 from taipy.core import delete_job
 from taipy.core import get as core_get
 from taipy.core import get as core_get
@@ -44,6 +56,9 @@ from taipy.core.data._abstract_tabular import _AbstractTabularDataNode
 from taipy.core.notification import CoreEventConsumerBase, EventEntityType
 from taipy.core.notification import CoreEventConsumerBase, EventEntityType
 from taipy.core.notification.event import Event, EventOperation
 from taipy.core.notification.event import Event, EventOperation
 from taipy.core.notification.notifier import Notifier
 from taipy.core.notification.notifier import Notifier
+from taipy.core.submission.submission import Submission
+from taipy.core.submission._submission_manager_factory import _SubmissionManagerFactory
+from taipy.core.submission.submission_status import SubmissionStatus
 from taipy.gui import Gui, State
 from taipy.gui import Gui, State
 from taipy.gui._warnings import _warn
 from taipy.gui._warnings import _warn
 from taipy.gui.gui import _DoNotUpdate
 from taipy.gui.gui import _DoNotUpdate
@@ -51,36 +66,21 @@ from taipy.gui.gui import _DoNotUpdate
 from ._adapters import _EntityType
 from ._adapters import _EntityType
 
 
 
 
-class _SubmissionStatus(Enum):
-    SUBMITTED = 0
-    COMPLETED = 1
-    CANCELED = 2
-    FAILED = 3
-    BLOCKED = 4
-    WAITING = 5
-    RUNNING = 6
-    UNDEFINED = 7
-
-
 class _SubmissionDetails:
 class _SubmissionDetails:
     def __init__(
     def __init__(
         self,
         self,
         client_id: str,
         client_id: str,
         module_context: str,
         module_context: str,
         callback: t.Callable,
         callback: t.Callable,
-        entity_id: str,
-        status: _SubmissionStatus,
-        jobs: t.List[Job],
+        submission: Submission,
     ) -> None:
     ) -> None:
         self.client_id = client_id
         self.client_id = client_id
         self.module_context = module_context
         self.module_context = module_context
         self.callback = callback
         self.callback = callback
-        self.entity_id = entity_id
-        self.status = status
-        self.jobs = jobs
+        self.submission = submission
 
 
-    def set_status(self, status: _SubmissionStatus):
-        self.status = status
+    def set_submission(self, submission: Submission):
+        self.submission = submission
         return self
         return self
 
 
 
 
@@ -112,7 +112,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
         self.data_nodes_by_owner: t.Optional[t.Dict[t.Optional[str], DataNode]] = None
         self.data_nodes_by_owner: t.Optional[t.Dict[t.Optional[str], DataNode]] = None
         self.scenario_configs: t.Optional[t.List[t.Tuple[str, str]]] = None
         self.scenario_configs: t.Optional[t.List[t.Tuple[str, str]]] = None
         self.jobs_list: t.Optional[t.List[Job]] = None
         self.jobs_list: t.Optional[t.List[Job]] = None
-        self.client_jobs_by_submission: t.Dict[str, _SubmissionDetails] = dict()
+        self.client_submission: t.Dict[str, _SubmissionDetails] = dict()
         # register to taipy core notification
         # register to taipy core notification
         reg_id, reg_queue = Notifier.register()
         reg_id, reg_queue = Notifier.register()
         # locks
         # locks
@@ -125,17 +125,19 @@ class _GuiCoreContext(CoreEventConsumerBase):
     def process_event(self, event: Event):
     def process_event(self, event: Event):
         if event.entity_type == EventEntityType.SCENARIO:
         if event.entity_type == EventEntityType.SCENARIO:
             if event.operation == EventOperation.SUBMISSION:
             if event.operation == EventOperation.SUBMISSION:
-                self.scenario_status_callback(event.attribute_name, True)
+                self.scenario_status_callback(event.attribute_name)
                 return
                 return
             self.scenario_refresh(
             self.scenario_refresh(
-                event.entity_id if event.operation != EventOperation.DELETION and is_readable(event.entity_id) else None
+                event.entity_id
+                if event.operation != EventOperation.DELETION and is_readable(t.cast(ScenarioId, event.entity_id))
+                else None
             )
             )
         elif event.entity_type == EventEntityType.SEQUENCE and event.entity_id:
         elif event.entity_type == EventEntityType.SEQUENCE and event.entity_id:
             sequence = None
             sequence = None
             try:
             try:
                 sequence = (
                 sequence = (
                     core_get(event.entity_id)
                     core_get(event.entity_id)
-                    if event.operation != EventOperation.DELETION and is_readable(event.entity_id)
+                    if event.operation != EventOperation.DELETION and is_readable(t.cast(SequenceId, event.entity_id))
                     else None
                     else None
                 )
                 )
                 if sequence and hasattr(sequence, "parent_ids") and sequence.parent_ids:
                 if sequence and hasattr(sequence, "parent_ids") and sequence.parent_ids:
@@ -147,6 +149,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
         elif event.entity_type == EventEntityType.JOB:
         elif event.entity_type == EventEntityType.JOB:
             with self.lock:
             with self.lock:
                 self.jobs_list = None
                 self.jobs_list = None
+        elif event.entity_type == EventEntityType.SUBMISSION:
             self.scenario_status_callback(event.entity_id)
             self.scenario_status_callback(event.entity_id)
         elif event.entity_type == EventEntityType.DATA_NODE:
         elif event.entity_type == EventEntityType.DATA_NODE:
             with self.lock:
             with self.lock:
@@ -165,50 +168,43 @@ class _GuiCoreContext(CoreEventConsumerBase):
             {"scenario": scenario_id or True},
             {"scenario": scenario_id or True},
         )
         )
 
 
-    def scenario_status_callback(self, job_id: str, is_submission: t.Optional[bool] = False):
-        if not job_id or not (is_submission or is_readable(job_id)):
+    def scenario_status_callback(self, submission_id: t.Optional[str]):
+        if not submission_id or not is_readable_submission(submission_id):
             return
             return
         try:
         try:
-            if is_submission:
-                sub_id = job_id
-                job = None
-            else:
-                job = core_get(job_id)
-                if not job:
-                    return
-                sub_id = job.submit_id
-            sub_details = self.client_jobs_by_submission.get(sub_id)
+            sub_details: t.Optional[_SubmissionDetails] = self.client_submission.get(submission_id)
             if not sub_details:
             if not sub_details:
                 return
                 return
 
 
-            if not sub_details.client_id or not sub_details.entity_id or not sub_details.jobs:
+            submission = core_get_submission(submission_id)
+            if not submission or not submission.entity_id:
                 return
                 return
 
 
-            entity = core_get(sub_details.entity_id)
+            entity = core_get(submission.entity_id)
             if not entity:
             if not entity:
                 return
                 return
 
 
-            new_status = self._get_submittable_status(sub_details.jobs)
-            if sub_details.status != new_status:
+            new_status = submission.submission_status
+            if sub_details.submission.submission_status != new_status:
                 # callback
                 # callback
                 self.gui._call_user_callback(
                 self.gui._call_user_callback(
                     sub_details.client_id,
                     sub_details.client_id,
                     sub_details.callback,
                     sub_details.callback,
-                    [entity, {"submission_status": new_status.name, "job": job}],
+                    [entity, {"submission_status": new_status.name}],
                     sub_details.module_context,
                     sub_details.module_context,
                 )
                 )
             with self.submissions_lock:
             with self.submissions_lock:
                 if new_status in (
                 if new_status in (
-                    _SubmissionStatus.COMPLETED,
-                    _SubmissionStatus.FAILED,
-                    _SubmissionStatus.CANCELED,
+                    SubmissionStatus.COMPLETED,
+                    SubmissionStatus.FAILED,
+                    SubmissionStatus.CANCELED,
                 ):
                 ):
-                    self.client_jobs_by_submission.pop(sub_id, None)
+                    self.client_submission.pop(submission_id, None)
                 else:
                 else:
-                    self.client_jobs_by_submission[sub_id] = sub_details.set_status(new_status)
+                    self.client_submission[submission_id] = sub_details.set_submission(submission)
 
 
         except Exception as e:
         except Exception as e:
-            _warn(f"Job ({job_id}) is not available", e)
+            _warn(f"Submission ({submission_id}) is not available", e)
 
 
         finally:
         finally:
             self.gui._broadcast(_GuiCoreContext._CORE_CHANGED_NAME, {"jobs": True})
             self.gui._broadcast(_GuiCoreContext._CORE_CHANGED_NAME, {"jobs": True})
@@ -264,10 +260,10 @@ class _GuiCoreContext(CoreEventConsumerBase):
         state.assign(_GuiCoreContext._SCENARIO_SELECTOR_ID_VAR, args[0])
         state.assign(_GuiCoreContext._SCENARIO_SELECTOR_ID_VAR, args[0])
 
 
     def get_scenario_by_id(self, id: str) -> t.Optional[Scenario]:
     def get_scenario_by_id(self, id: str) -> t.Optional[Scenario]:
-        if not id or not is_readable(id):
+        if not id or not is_readable(t.cast(ScenarioId, id)):
             return None
             return None
         try:
         try:
-            return core_get(id)
+            return core_get(t.cast(ScenarioId, id))
         except Exception:
         except Exception:
             return None
             return None
 
 
@@ -438,72 +434,26 @@ class _GuiCoreContext(CoreEventConsumerBase):
         if entity:
         if entity:
             try:
             try:
                 jobs = core_submit(entity)
                 jobs = core_submit(entity)
+                submission_entity = core_get_submission(jobs[0].submit_id if isinstance(jobs, list) else jobs.submit_id)
                 if submission_cb := data.get("on_submission_change"):
                 if submission_cb := data.get("on_submission_change"):
                     submission_fn = self.gui._get_user_function(submission_cb)
                     submission_fn = self.gui._get_user_function(submission_cb)
                     if callable(submission_fn):
                     if callable(submission_fn):
-                        job_ids = [j.id for j in (jobs if isinstance(jobs, list) else [jobs])]
                         client_id = self.gui._get_client_id()
                         client_id = self.gui._get_client_id()
                         module_context = self.gui._get_locals_context()
                         module_context = self.gui._get_locals_context()
-                        sub_id = jobs[0].submit_id if isinstance(jobs, list) else jobs.submit_id
                         with self.submissions_lock:
                         with self.submissions_lock:
-                            self.client_jobs_by_submission[sub_id] = _SubmissionDetails(
+                            self.client_submission[submission_entity.id] = _SubmissionDetails(
                                 client_id,
                                 client_id,
                                 module_context,
                                 module_context,
                                 submission_fn,
                                 submission_fn,
-                                entity_id,
-                                _SubmissionStatus.SUBMITTED,
-                                job_ids,
+                                submission_entity,
                             )
                             )
                     else:
                     else:
                         _warn(f"on_submission_change(): '{submission_cb}' is not a valid function.")
                         _warn(f"on_submission_change(): '{submission_cb}' is not a valid function.")
-                self.scenario_status_callback(jobs[0].id if len(jobs) else "" if isinstance(jobs, list) else jobs.id)
+                self.scenario_status_callback(submission_entity.id)
                 state.assign(_GuiCoreContext._SCENARIO_VIZ_ERROR_VAR, "")
                 state.assign(_GuiCoreContext._SCENARIO_VIZ_ERROR_VAR, "")
             except Exception as e:
             except Exception as e:
                 state.assign(_GuiCoreContext._SCENARIO_VIZ_ERROR_VAR, f"Error submitting entity. {e}")
                 state.assign(_GuiCoreContext._SCENARIO_VIZ_ERROR_VAR, f"Error submitting entity. {e}")
 
 
-    def _get_submittable_status(self, jobs_ids: t.List[str]) -> _SubmissionStatus:
-        abandoned = False
-        canceled = False
-        blocked = False
-        waiting = False
-        running = False
-        completed = False
-        for id in jobs_ids:
-            job = core_get(id)
-            if not job:
-                continue
-            if job.is_failed():
-                return _SubmissionStatus.FAILED
-            if job.is_canceled():
-                canceled = True
-            if job.is_blocked():
-                blocked = True
-                continue
-            if job.is_pending() or job.is_submitted():
-                waiting = True
-                continue
-            if job.is_running():
-                running = True
-                continue
-            if job.is_completed() or job.is_skipped():
-                completed = True
-                continue
-            if job.is_abandoned():
-                abandoned = True
-        if canceled:
-            return _SubmissionStatus.CANCELED
-        if abandoned:
-            return _SubmissionStatus.UNDEFINED
-        if running:
-            return _SubmissionStatus.RUNNING
-        if waiting:
-            return _SubmissionStatus.WAITING
-        if blocked:
-            return _SubmissionStatus.BLOCKED
-        if completed:
-            return _SubmissionStatus.COMPLETED
-        return _SubmissionStatus.UNDEFINED
-
     def __do_datanodes_tree(self):
     def __do_datanodes_tree(self):
         if self.data_nodes_by_owner is None:
         if self.data_nodes_by_owner is None:
             self.data_nodes_by_owner = defaultdict(list)
             self.data_nodes_by_owner = defaultdict(list)
@@ -690,10 +640,10 @@ class _GuiCoreContext(CoreEventConsumerBase):
                             cycles_scenarios.extend(scenarios)
                             cycles_scenarios.extend(scenarios)
                         else:
                         else:
                             cycles_scenarios.append(cycle)
                             cycles_scenarios.append(cycle)
-                elif is_readable(owner_id):
+                elif is_readable(t.cast(ScenarioId, owner_id)):
                     entity = core_get(owner_id)
                     entity = core_get(owner_id)
-                    if entity and (scenarios := self.scenario_by_cycle.get(entity)):
-                        cycles_scenarios.extend(scenarios)
+                    if entity and (scenarios_cycle := self.scenario_by_cycle.get(t.cast(Cycle, entity))):
+                        cycles_scenarios.extend(scenarios_cycle)
                     elif isinstance(entity, Scenario):
                     elif isinstance(entity, Scenario):
                         cycles_scenarios.append(entity)
                         cycles_scenarios.append(entity)
         return cycles_scenarios
         return cycles_scenarios
@@ -709,7 +659,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
             res = []
             res = []
             for e in dn.edits:
             for e in dn.edits:
                 job_id = e.get("job_id")
                 job_id = e.get("job_id")
-                job: Job = None
+                job: t.Optional[Job] = None
                 if job_id:
                 if job_id:
                     if not is_readable(job_id):
                     if not is_readable(job_id):
                         job_id += " not readable"
                         job_id += " not readable"
@@ -758,10 +708,10 @@ class _GuiCoreContext(CoreEventConsumerBase):
         return _DoNotUpdate()
         return _DoNotUpdate()
 
 
     def __check_readable_editable(self, state: State, id: str, type: str, var: str):
     def __check_readable_editable(self, state: State, id: str, type: str, var: str):
-        if not is_readable(id):
+        if not is_readable(t.cast(DataNodeId, id)):
             state.assign(var, f"{type} {id} is not readable.")
             state.assign(var, f"{type} {id} is not readable.")
             return False
             return False
-        if not is_editable(id):
+        if not is_editable(t.cast(DataNodeId, id)):
             state.assign(var, f"{type} {id} is not editable.")
             state.assign(var, f"{type} {id} is not editable.")
             return False
             return False
         return True
         return True
@@ -832,7 +782,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
             id
             id
             and isinstance(datanode, DataNode)
             and isinstance(datanode, DataNode)
             and id == datanode.id
             and id == datanode.id
-            and is_readable(id)
+            and is_readable(t.cast(DataNodeId, id))
             and (dn := core_get(id))
             and (dn := core_get(id))
             and isinstance(dn, DataNode)
             and isinstance(dn, DataNode)
             and dn.is_ready_for_reading
             and dn.is_ready_for_reading
@@ -848,7 +798,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
             id
             id
             and isinstance(datanode, DataNode)
             and isinstance(datanode, DataNode)
             and id == datanode.id
             and id == datanode.id
-            and is_readable(id)
+            and is_readable(t.cast(DataNodeId, id))
             and (dn := core_get(id))
             and (dn := core_get(id))
             and isinstance(dn, DataNode)
             and isinstance(dn, DataNode)
             and dn.is_ready_for_reading
             and dn.is_ready_for_reading
@@ -866,7 +816,7 @@ class _GuiCoreContext(CoreEventConsumerBase):
             id
             id
             and isinstance(datanode, DataNode)
             and isinstance(datanode, DataNode)
             and id == datanode.id
             and id == datanode.id
-            and is_readable(id)
+            and is_readable(t.cast(DataNodeId, id))
             and (dn := core_get(id))
             and (dn := core_get(id))
             and isinstance(dn, DataNode)
             and isinstance(dn, DataNode)
             and dn.is_ready_for_reading
             and dn.is_ready_for_reading
@@ -892,3 +842,12 @@ class _GuiCoreContext(CoreEventConsumerBase):
             state.assign(_GuiCoreContext._DATANODE_VIZ_DATA_ID_VAR, data_id)
             state.assign(_GuiCoreContext._DATANODE_VIZ_DATA_ID_VAR, data_id)
         elif chart_id := data.get("chart_id"):
         elif chart_id := data.get("chart_id"):
             state.assign(_GuiCoreContext._DATANODE_VIZ_DATA_CHART_ID_VAR, chart_id)
             state.assign(_GuiCoreContext._DATANODE_VIZ_DATA_CHART_ID_VAR, chart_id)
+
+
+# TODO remove when Submission is supported by Core API
+def is_readable_submission(id: str):
+    return _SubmissionManagerFactory._build_manager()._is_readable(t.cast(Submission, id))
+
+
+def core_get_submission(id: str):
+    return _SubmissionManagerFactory._build_manager()._get(id)

+ 1 - 0
tests/conftest.py

@@ -88,6 +88,7 @@ def inject_core_sections():
                 ("configure_pickle_data_node", DataNodeConfig._configure_pickle),
                 ("configure_pickle_data_node", DataNodeConfig._configure_pickle),
                 ("configure_excel_data_node", DataNodeConfig._configure_excel),
                 ("configure_excel_data_node", DataNodeConfig._configure_excel),
                 ("configure_generic_data_node", DataNodeConfig._configure_generic),
                 ("configure_generic_data_node", DataNodeConfig._configure_generic),
+                ("configure_s3_object_data_node", DataNodeConfig._configure_s3_object),
             ],
             ],
         )
         )
         _inject_section(
         _inject_section(

+ 25 - 1
tests/core/config/checkers/test_data_node_config_checker.py

@@ -88,7 +88,7 @@ class TestDataNodeConfigChecker:
         assert len(Config._collector.errors) == 1
         assert len(Config._collector.errors) == 1
         expected_error_message = (
         expected_error_message = (
             "`storage_type` field of DataNodeConfig `new` must be either csv, sql_table,"
             "`storage_type` field of DataNodeConfig `new` must be either csv, sql_table,"
-            " sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory."
+            " sql, mongo_collection, pickle, excel, generic, json, parquet, s3_object, or in_memory."
             ' Current value of property `storage_type` is "bar".'
             ' Current value of property `storage_type` is "bar".'
         )
         )
         assert expected_error_message in caplog.text
         assert expected_error_message in caplog.text
@@ -170,6 +170,19 @@ class TestDataNodeConfigChecker:
         ]
         ]
         assert all(message in caplog.text for message in expected_error_messages)
         assert all(message in caplog.text for message in expected_error_messages)
 
 
+        config._sections[DataNodeConfig.name]["new"].storage_type = "s3_object"
+        with pytest.raises(SystemExit):
+            Config._collector = IssueCollector()
+            Config.check()
+        assert len(Config._collector.errors) == 4
+        expected_error_messages = [
+            "DataNodeConfig `new` is missing the required property `aws_access_key` for type `s3_object`.",
+            "DataNodeConfig `new` is missing the required property `aws_secret_access_key` for type `s3_object`.",
+            "DataNodeConfig `new` is missing the required property `aws_s3_bucket_name` for type `s3_object`.",
+            "DataNodeConfig `new` is missing the required property `aws_s3_object_key` for type `s3_object`.",
+        ]
+        assert all(message in caplog.text for message in expected_error_messages)
+
     def test_check_properties_of_sqlite_engine(self, caplog):
     def test_check_properties_of_sqlite_engine(self, caplog):
         config = Config._applied_config
         config = Config._applied_config
         Config._compile_configs()
         Config._compile_configs()
@@ -353,6 +366,17 @@ class TestDataNodeConfigChecker:
         Config.check()
         Config.check()
         assert len(Config._collector.errors) == 0
         assert len(Config._collector.errors) == 0
 
 
+        config._sections[DataNodeConfig.name]["new"].storage_type = "s3_object"
+        config._sections[DataNodeConfig.name]["new"].properties = {
+            "aws_access_key": "access_key",
+            "aws_secret_access_key": "secret_acces_key",
+            "aws_s3_bucket_name": "s3_bucket_name",
+            "aws_s3_object_key": "s3_object_key",
+        }
+        Config._collector = IssueCollector()
+        Config.check()
+        assert len(Config._collector.errors) == 0
+
         config._sections[DataNodeConfig.name]["new"].storage_type = "excel"
         config._sections[DataNodeConfig.name]["new"].storage_type = "excel"
         Config._collector = IssueCollector()
         Config._collector = IssueCollector()
         Config.check()
         Config.check()

+ 16 - 0
tests/core/config/test_config.py

@@ -105,3 +105,19 @@ class TestConfig:
         )
         )
         Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k)
         Config.configure_mongo_collection_data_node(a, b, c, d, e, f, g, h, extra_args, scope, vp, property=k)
         assert len(Config.data_nodes) == 2
         assert len(Config.data_nodes) == 2
+
+    def test_configure_s3_object_data_node(self):
+        a, b, c, d, e, f, extra_args, scope, vp, k = (
+            "foo",
+            "access_key",
+            "secret_acces_key",
+            "s3_bucket_name",
+            "s3_object_key",
+            None,
+            {"foo": "bar"},
+            Scope.SCENARIO,
+            timedelta(1),
+            "qux",
+        )
+        Config.configure_s3_object_data_node(a, b, c, d, e, f, extra_args, scope, vp, property=k)
+        assert len(Config.data_nodes) == 2

+ 76 - 0
tests/core/config/test_configure_default_config.py

@@ -41,6 +41,12 @@ def test_set_default_data_node_configuration():
     assert data_node4.scope == Scope.SCENARIO
     assert data_node4.scope == Scope.SCENARIO
     assert data_node4.validity_period == timedelta(1)
     assert data_node4.validity_period == timedelta(1)
 
 
+    Config.set_default_data_node_configuration("s3_object", validity_period=timedelta(1))
+    data_node5 = Config.configure_data_node(id="input_data5")
+    assert data_node5.storage_type == "s3_object"
+    assert data_node5.scope == Scope.SCENARIO
+    assert data_node5.validity_period == timedelta(1)
+
 
 
 def test_set_default_data_node_configuration_replace_old_default_config():
 def test_set_default_data_node_configuration_replace_old_default_config():
     Config.set_default_data_node_configuration(
     Config.set_default_data_node_configuration(
@@ -580,3 +586,73 @@ def test_set_default_mongo_collection_data_node_configuration():
     assert dn3.db_extra_args == {"default": "default"}
     assert dn3.db_extra_args == {"default": "default"}
     assert dn3.scope == Scope.GLOBAL
     assert dn3.scope == Scope.GLOBAL
     assert dn3.validity_period == timedelta(1)
     assert dn3.validity_period == timedelta(1)
+
+
+def test_set_default_s3_object_data_node_configuration():
+    Config.set_default_data_node_configuration(
+        storage_type="s3_object",
+        aws_access_key="default_access_key",
+        aws_secret_access_key="default_secret_acces_key",
+        aws_s3_bucket_name="default_bucket_name",
+        aws_s3_object_key="default_object_key",
+        aws_region="",
+        aws_s3_object_parameters={"default": "default"},
+        scope=Scope.GLOBAL,
+        validity_period=timedelta(2),
+    )
+
+    # Config with generic config_data_node without storage_type
+    # should return the default DataNode
+    dn1 = Config.configure_data_node(id="dn1")
+    assert dn1.storage_type == "s3_object"
+    assert dn1.aws_access_key == "default_access_key"
+    assert dn1.aws_secret_access_key == "default_secret_acces_key"
+    assert dn1.aws_s3_bucket_name == "default_bucket_name"
+    assert dn1.aws_s3_object_key == "default_object_key"
+    assert dn1.aws_region == ""
+    assert dn1.aws_s3_object_parameters == {"default": "default"}
+    assert dn1.scope == Scope.GLOBAL
+    assert dn1.validity_period == timedelta(2)
+
+    # Config with generic config_data_node without storage_type
+    # with custom properties
+    dn2 = Config.configure_data_node(
+        id="dn2",
+        aws_access_key="custom_access_key_2",
+        aws_secret_access_key="custom_secret_acces_key_2",
+        aws_s3_bucket_name="custom_bucket_name_2",
+        aws_s3_object_key="custom_object_key_2",
+    )
+    assert dn2.storage_type == "s3_object"
+    assert dn2.aws_access_key == "custom_access_key_2"
+    assert dn2.aws_secret_access_key == "custom_secret_acces_key_2"
+    assert dn2.aws_s3_bucket_name == "custom_bucket_name_2"
+    assert dn2.aws_s3_object_key == "custom_object_key_2"
+    assert dn2.aws_region == ""
+    assert dn2.aws_s3_object_parameters == {"default": "default"}
+    assert dn2.scope == Scope.GLOBAL
+    assert dn2.validity_period == timedelta(2)
+
+    # Config a datanode with specific "storage_type" = "s3_object"
+    # should use properties from the default datanode
+    dn3 = Config.configure_data_node(
+        id="dn3",
+        storage_type="s3_object",
+        aws_access_key="custom_access_key_3",
+        aws_secret_access_key="custom_secret_acces_key_3",
+        aws_s3_bucket_name="custom_bucket_name_3",
+        aws_s3_object_key="custom_object_key_3",
+        aws_region="",
+        aws_s3_object_parameters={"default": "default"},
+        scope=Scope.GLOBAL,
+        validity_period=timedelta(1),
+    )
+    assert dn3.storage_type == "s3_object"
+    assert dn3.aws_access_key == "custom_access_key_3"
+    assert dn3.aws_secret_access_key == "custom_secret_acces_key_3"
+    assert dn3.aws_s3_bucket_name == "custom_bucket_name_3"
+    assert dn3.aws_s3_object_key == "custom_object_key_3"
+    assert dn3.aws_region == ""
+    assert dn3.aws_s3_object_parameters == {"default": "default"}
+    assert dn3.scope == Scope.GLOBAL
+    assert dn3.validity_period == timedelta(1)

+ 19 - 2
tests/core/config/test_data_node_config.py

@@ -93,6 +93,23 @@ def test_data_node_config_default_parameter():
     assert mongo_dn_cfg.db_driver == ""
     assert mongo_dn_cfg.db_driver == ""
     assert mongo_dn_cfg.validity_period is None
     assert mongo_dn_cfg.validity_period is None
 
 
+    aws_s3_object_dn_cfg = Config.configure_data_node(
+        "data_node_11",
+        "s3_object",
+        aws_access_key="test",
+        aws_secret_access_key="test_secret",
+        aws_s3_bucket_name="test_bucket",
+        aws_s3_object_key="test_file.txt",
+    )
+    assert aws_s3_object_dn_cfg.scope == Scope.SCENARIO
+    assert aws_s3_object_dn_cfg.aws_access_key == "test"
+    assert aws_s3_object_dn_cfg.aws_secret_access_key == "test_secret"
+    assert aws_s3_object_dn_cfg.aws_s3_bucket_name == "test_bucket"
+    assert aws_s3_object_dn_cfg.aws_s3_object_key == "test_file.txt"
+    assert aws_s3_object_dn_cfg.aws_region is None
+    assert aws_s3_object_dn_cfg.aws_s3_object_parameters is None
+    assert aws_s3_object_dn_cfg.validity_period is None
+
 
 
 def test_data_node_config_check(caplog):
 def test_data_node_config_check(caplog):
     data_node_config = Config.configure_data_node("data_nodes1", "pickle")
     data_node_config = Config.configure_data_node("data_nodes1", "pickle")
@@ -114,7 +131,7 @@ def test_data_node_config_check(caplog):
         Config.check()
         Config.check()
     expected_error_message = (
     expected_error_message = (
         "`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,"
         "`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,"
-        " sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory. Current"
+        " sql, mongo_collection, pickle, excel, generic, json, parquet, s3_object, or in_memory. Current"
         ' value of property `storage_type` is "bar".'
         ' value of property `storage_type` is "bar".'
     )
     )
     assert expected_error_message in caplog.text
     assert expected_error_message in caplog.text
@@ -136,7 +153,7 @@ def test_data_node_config_check(caplog):
         Config.check()
         Config.check()
     expected_error_message = (
     expected_error_message = (
         "`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,"
         "`storage_type` field of DataNodeConfig `data_nodes` must be either csv, sql_table,"
-        " sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory."
+        " sql, mongo_collection, pickle, excel, generic, json, parquet, s3_object, or in_memory."
         ' Current value of property `storage_type` is "bar".'
         ' Current value of property `storage_type` is "bar".'
     )
     )
     assert expected_error_message in caplog.text
     assert expected_error_message in caplog.text

+ 103 - 0
tests/core/data/test_aws_s3_data_node.py

@@ -0,0 +1,103 @@
+# Copyright 2023 Avaiga Private Limited
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
+# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations under the License.
+
+import os
+from dataclasses import dataclass
+from unittest.mock import patch
+
+import boto3
+from moto import mock_s3
+import pytest
+
+from taipy.core.data.data_node_id import DataNodeId
+from taipy.core.data.aws_s3 import S3ObjectDataNode
+from taipy.core.exceptions.exceptions import InvalidCustomDocument, MissingRequiredProperty
+from taipy.config.common.scope import Scope
+
+
+class TestS3ObjectDataNode:
+    __properties = [
+        {
+            "aws_access_key": "testing",
+            "aws_secret_access_key": "testing",
+            "aws_s3_bucket_name": "taipy",
+            "aws_s3_object_key": " taipy-object",
+            "aws_region": "us-east-1",
+            "aws_s3_object_parameters": {},
+        }
+    ]
+
+    @mock_s3
+    @pytest.mark.parametrize("properties", __properties)
+    def test_create(self, properties):
+        aws_s3_object_dn = S3ObjectDataNode(
+            "foo_bar_aws_s3",
+            Scope.SCENARIO,
+            properties=properties,
+        )
+        assert isinstance(aws_s3_object_dn, S3ObjectDataNode)
+        assert aws_s3_object_dn.storage_type() == "s3_object"
+        assert aws_s3_object_dn.config_id == "foo_bar_aws_s3"
+        assert aws_s3_object_dn.scope == Scope.SCENARIO
+        assert aws_s3_object_dn.id is not None
+        assert aws_s3_object_dn.owner_id is None
+        assert aws_s3_object_dn.job_ids == []
+        assert aws_s3_object_dn.is_ready_for_reading
+
+    @mock_s3
+    @pytest.mark.parametrize(
+        "data",
+        [
+            ("Hello, write world!"),
+        ],
+    )
+    @pytest.mark.parametrize("properties", __properties)
+    def test_write(self, properties, data):
+        bucket_name = properties["aws_s3_bucket_name"]
+        # Create an S3 client
+        s3_client = boto3.client("s3")
+        # Create a bucket
+        s3_client.create_bucket(Bucket=bucket_name)
+        # Assign a name to the object
+        object_key = properties["aws_s3_object_key"]
+        # Create Taipy S3ObjectDataNode
+        aws_s3_object_dn = S3ObjectDataNode("foo_aws_s3", Scope.SCENARIO, properties=properties)
+        # Put an object in the bucket with Taipy
+        aws_s3_object_dn._write(data)
+        # Read the object with boto3
+        response = s3_client.get_object(Bucket=bucket_name, Key=object_key)
+
+        assert response["Body"].read().decode("utf-8") == "Hello, write world!"
+
+    @mock_s3
+    @pytest.mark.parametrize(
+        "data",
+        [
+            ("Hello, read world!"),
+        ],
+    )
+    @pytest.mark.parametrize("properties", __properties)
+    def test_read(self, properties, data):
+        bucket_name = properties["aws_s3_bucket_name"]
+        # Create an S3 client
+        client = boto3.client("s3")
+        # Create a bucket
+        client.create_bucket(Bucket=bucket_name)
+        # Put an object in the bucket with boto3
+        object_key = properties["aws_s3_object_key"]
+        object_body = "Hello, read world!"
+        client.put_object(Body=object_body, Bucket=bucket_name, Key=object_key)
+        # Create Taipy S3ObjectDataNode
+        aws_s3_object_dn = S3ObjectDataNode("foo_aws_s3", Scope.SCENARIO, properties=properties)
+        # Read the Object from bucket with Taipy
+        response = aws_s3_object_dn._read()
+
+        assert response == data

+ 1 - 1
tests/core/test_core.py

@@ -31,7 +31,7 @@ class TestCore:
                 core.run()
                 core.run()
         expected_error_message = (
         expected_error_message = (
             "`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,"
             "`storage_type` field of DataNodeConfig `d0` must be either csv, sql_table,"
-            " sql, mongo_collection, pickle, excel, generic, json, parquet, or in_memory."
+            " sql, mongo_collection, pickle, excel, generic, json, parquet, s3_object, or in_memory."
             ' Current value of property `storage_type` is "toto".'
             ' Current value of property `storage_type` is "toto".'
         )
         )
         assert expected_error_message in caplog.text
         assert expected_error_message in caplog.text

+ 64 - 86
tests/core/test_taipy.py

@@ -16,6 +16,7 @@ import shutil
 from unittest import mock
 from unittest import mock
 
 
 import pytest
 import pytest
+
 import taipy.core.taipy as tp
 import taipy.core.taipy as tp
 from taipy.config.common.frequency import Frequency
 from taipy.config.common.frequency import Frequency
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
@@ -46,7 +47,6 @@ from taipy.core.exceptions.exceptions import DataNodeConfigIsNotGlobal, InvalidE
 from taipy.core.job._job_manager import _JobManager
 from taipy.core.job._job_manager import _JobManager
 from taipy.core.job.job import Job
 from taipy.core.job.job import Job
 from taipy.core.scenario._scenario_manager import _ScenarioManager
 from taipy.core.scenario._scenario_manager import _ScenarioManager
-from taipy.core.sequence._sequence_manager import _SequenceManager
 from taipy.core.task._task_manager import _TaskManager
 from taipy.core.task._task_manager import _TaskManager
 
 
 
 
@@ -84,7 +84,7 @@ class TestTaipy:
             mck.assert_called_once_with(scenario_id)
             mck.assert_called_once_with(scenario_id)
 
 
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_editable") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_editable") as mck:
-            scenario = Scenario("scenario_config_id", [], {})
+            scenario = Scenario("scenario_config_id", set(), {})
             tp.is_editable(scenario)
             tp.is_editable(scenario)
             mck.assert_called_once_with(scenario)
             mck.assert_called_once_with(scenario)
 
 
@@ -94,7 +94,7 @@ class TestTaipy:
             mck.assert_called_once_with(sequence_id)
             mck.assert_called_once_with(sequence_id)
 
 
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_editable") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_editable") as mck:
-            sequence = Sequence({}, [], "sequence_id")
+            sequence = Sequence({}, [], SequenceId("sequence_id"))
             tp.is_editable(sequence)
             tp.is_editable(sequence)
             mck.assert_called_once_with(sequence)
             mck.assert_called_once_with(sequence)
 
 
@@ -127,15 +127,12 @@ class TestTaipy:
             mck.assert_called_once_with(data_node)
             mck.assert_called_once_with(data_node)
 
 
     def test_is_editable(self):
     def test_is_editable(self):
-        current_date = datetime.datetime.now()
-
-        cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date)
-        scenario = Scenario("scenario_config_id", [], {}, sequences={"sequence": {}})
-
+        a_date = datetime.datetime.now()
+        cycle = Cycle(Frequency.DAILY, {}, a_date, a_date, a_date)
+        scenario = Scenario("scenario_config_id", set(), {}, sequences={"sequence": {}})
         task = Task("task_config_id", {}, print)
         task = Task("task_config_id", {}, print)
-        job = Job("job_id", task, "submit_id", scenario.id)
-        dn = PickleDataNode("data_node_config_id", Scope.SCENARIO)
-
+        job = Job(JobId("job_id"), task, "submit_id", scenario.id)
+        dn = PickleDataNode(config_id="data_node_config_id", scope=Scope.SCENARIO)
         _CycleManager._set(cycle)
         _CycleManager._set(cycle)
         _ScenarioManager._set(scenario)
         _ScenarioManager._set(scenario)
         _TaskManager._set(task)
         _TaskManager._set(task)
@@ -166,7 +163,7 @@ class TestTaipy:
             mck.assert_called_once_with(scenario_id)
             mck.assert_called_once_with(scenario_id)
 
 
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_readable") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_readable") as mck:
-            scenario = Scenario("scenario_config_id", [], {})
+            scenario = Scenario("scenario_config_id", set(), {})
             tp.is_readable(scenario)
             tp.is_readable(scenario)
             mck.assert_called_once_with(scenario)
             mck.assert_called_once_with(scenario)
 
 
@@ -176,7 +173,7 @@ class TestTaipy:
             mck.assert_called_once_with(sequence_id)
             mck.assert_called_once_with(sequence_id)
 
 
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_readable") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_readable") as mck:
-            sequence = Sequence({}, [], "sequence_id")
+            sequence = Sequence({}, [], SequenceId("sequence_id"))
             tp.is_readable(sequence)
             tp.is_readable(sequence)
             mck.assert_called_once_with(sequence)
             mck.assert_called_once_with(sequence)
 
 
@@ -209,15 +206,12 @@ class TestTaipy:
             mck.assert_called_once_with(data_node)
             mck.assert_called_once_with(data_node)
 
 
     def test_is_readable(self):
     def test_is_readable(self):
-        current_date = datetime.datetime.now()
-
-        cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date)
-        scenario = Scenario("scenario_config_id", [], {}, sequences={"sequence": {}})
-
+        a_date = datetime.datetime.now()
+        cycle = Cycle(Frequency.DAILY, {}, a_date, a_date, a_date)
+        scenario = Scenario("scenario_config_id", set(), {}, sequences={"sequence": {}})
         task = Task("task_config_id", {}, print)
         task = Task("task_config_id", {}, print)
-        job = Job("job_id", task, "submit_id", scenario.id)
-        dn = PickleDataNode("data_node_config_id", Scope.SCENARIO)
-
+        job = Job(JobId("a_job_id"), task, "submit_id", scenario.id)
+        dn = PickleDataNode(config_id="a_data_node_config_id", scope=Scope.SCENARIO)
         _CycleManager._set(cycle)
         _CycleManager._set(cycle)
         _ScenarioManager._set(scenario)
         _ScenarioManager._set(scenario)
         _TaskManager._set(task)
         _TaskManager._set(task)
@@ -239,7 +233,7 @@ class TestTaipy:
             mck.assert_called_once_with(scenario_id)
             mck.assert_called_once_with(scenario_id)
 
 
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_submittable") as mck:
-            scenario = Scenario("scenario_config_id", [], {})
+            scenario = Scenario("scenario_config_id", set(), {})
             tp.is_submittable(scenario)
             tp.is_submittable(scenario)
             mck.assert_called_once_with(scenario)
             mck.assert_called_once_with(scenario)
 
 
@@ -249,7 +243,7 @@ class TestTaipy:
             mck.assert_called_once_with(sequence_id)
             mck.assert_called_once_with(sequence_id)
 
 
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_submittable") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._is_submittable") as mck:
-            sequence = Sequence({}, [], "sequence_id")
+            sequence = Sequence({}, [], SequenceId("sequence_id"))
             tp.is_submittable(sequence)
             tp.is_submittable(sequence)
             mck.assert_called_once_with(sequence)
             mck.assert_called_once_with(sequence)
 
 
@@ -265,12 +259,10 @@ class TestTaipy:
 
 
     def test_is_submittable(self):
     def test_is_submittable(self):
         current_date = datetime.datetime.now()
         current_date = datetime.datetime.now()
-
         cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date)
         cycle = Cycle(Frequency.DAILY, {}, current_date, current_date, current_date)
-        scenario = Scenario("scenario_config_id", [], {}, sequences={"sequence": {}})
-
+        scenario = Scenario("scenario_config_id", set(), {}, sequences={"sequence": {}})
         task = Task("task_config_id", {}, print)
         task = Task("task_config_id", {}, print)
-        job = Job("job_id", task, "submit_id", scenario.id)
+        job = Job(JobId("job_id"), task, "submit_id", ScenarioId(scenario.id))
         dn = PickleDataNode("data_node_config_id", Scope.SCENARIO)
         dn = PickleDataNode("data_node_config_id", Scope.SCENARIO)
 
 
         _CycleManager._set(cycle)
         _CycleManager._set(cycle)
@@ -350,7 +342,7 @@ class TestTaipy:
             mck.assert_called_once_with(scenario_id)
             mck.assert_called_once_with(scenario_id)
 
 
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_deletable") as mck:
-            scenario = Scenario("config_id", [], {})
+            scenario = Scenario("config_id", set(), {})
             tp.is_deletable(scenario)
             tp.is_deletable(scenario)
             mck.assert_called_once_with(scenario)
             mck.assert_called_once_with(scenario)
 
 
@@ -360,7 +352,7 @@ class TestTaipy:
             mck.assert_called_once_with(job_id)
             mck.assert_called_once_with(job_id)
 
 
         with mock.patch("taipy.core.job._job_manager._JobManager._is_deletable") as mck:
         with mock.patch("taipy.core.job._job_manager._JobManager._is_deletable") as mck:
-            job = Job("job_id", task, "submit_id", task.id)
+            job = Job(JobId("job_id"), task, "submit_id", task.id)
             tp.is_deletable(job)
             tp.is_deletable(job)
             mck.assert_called_once_with(job)
             mck.assert_called_once_with(job)
 
 
@@ -371,7 +363,7 @@ class TestTaipy:
             mck.assert_called_once_with(scenario_id)
             mck.assert_called_once_with(scenario_id)
 
 
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._is_promotable_to_primary") as mck:
-            scenario = Scenario("config_id", [], {})
+            scenario = Scenario("config_id", set(), {})
             tp.is_promotable(scenario)
             tp.is_promotable(scenario)
             mck.assert_called_once_with(scenario)
             mck.assert_called_once_with(scenario)
 
 
@@ -439,36 +431,48 @@ class TestTaipy:
             mck.assert_called_once_with(scenario, scenario, data_node_config_id="dn")
             mck.assert_called_once_with(scenario, scenario, data_node_config_id="dn")
 
 
     def test_subscribe_scenario(self, scenario):
     def test_subscribe_scenario(self, scenario):
+        def cb(s, j):
+            print()
+
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._subscribe") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._subscribe") as mck:
-            tp.subscribe_scenario(print)
-            mck.assert_called_once_with(print, [], None)
+            tp.subscribe_scenario(cb)
+            mck.assert_called_once_with(cb, [], None)
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._subscribe") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._subscribe") as mck:
-            tp.subscribe_scenario(print, scenario=scenario)
-            mck.assert_called_once_with(print, [], scenario)
+            tp.subscribe_scenario(cb, scenario=scenario)
+            mck.assert_called_once_with(cb, [], scenario)
 
 
     def test_unsubscribe_scenario(self, scenario):
     def test_unsubscribe_scenario(self, scenario):
+        def cb(s, j):
+            print()
+
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe") as mck:
-            tp.unsubscribe_scenario(print)
-            mck.assert_called_once_with(print, None, None)
+            tp.unsubscribe_scenario(cb)
+            mck.assert_called_once_with(cb, None, None)
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe") as mck:
         with mock.patch("taipy.core.scenario._scenario_manager._ScenarioManager._unsubscribe") as mck:
-            tp.unsubscribe_scenario(print, scenario=scenario)
-            mck.assert_called_once_with(print, None, scenario)
+            tp.unsubscribe_scenario(cb, scenario=scenario)
+            mck.assert_called_once_with(cb, None, scenario)
 
 
     def test_subscribe_sequence(self, sequence):
     def test_subscribe_sequence(self, sequence):
+        def cb(s, j):
+            print()
+
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._subscribe") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._subscribe") as mck:
-            tp.subscribe_sequence(print)
-            mck.assert_called_once_with(print, None, None)
+            tp.subscribe_sequence(cb)
+            mck.assert_called_once_with(cb, None, None)
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._subscribe") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._subscribe") as mck:
-            tp.subscribe_sequence(print, sequence=sequence)
-            mck.assert_called_once_with(print, None, sequence)
+            tp.subscribe_sequence(cb, sequence=sequence)
+            mck.assert_called_once_with(cb, None, sequence)
 
 
     def test_unsubscribe_sequence(self, sequence):
     def test_unsubscribe_sequence(self, sequence):
+        def cb(s, j):
+            print()
+
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe") as mck:
-            tp.unsubscribe_sequence(callback=print)
-            mck.assert_called_once_with(print, None, None)
+            tp.unsubscribe_sequence(callback=cb)
+            mck.assert_called_once_with(cb, None, None)
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._unsubscribe") as mck:
-            tp.unsubscribe_sequence(callback=print, sequence=sequence)
-            mck.assert_called_once_with(print, None, sequence)
+            tp.unsubscribe_sequence(callback=cb, sequence=sequence)
+            mck.assert_called_once_with(cb, None, sequence)
 
 
     def test_delete_sequence(self):
     def test_delete_sequence(self):
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._hard_delete") as mck:
         with mock.patch("taipy.core.sequence._sequence_manager._SequenceManager._hard_delete") as mck:
@@ -544,40 +548,19 @@ class TestTaipy:
             tp.cancel_job("job_id")
             tp.cancel_job("job_id")
             mck.assert_called_once_with("job_id")
             mck.assert_called_once_with("job_id")
 
 
-    def test_block_config_when_core_is_running_in_development_mode(self):
-        input_cfg_1 = Config.configure_data_node(id="i1", storage_type="pickle", default_data=1, scope=Scope.SCENARIO)
-        output_cfg_1 = Config.configure_data_node(id="o1", storage_type="pickle", scope=Scope.SCENARIO)
-        task_cfg_1 = Config.configure_task("t1", print, input_cfg_1, output_cfg_1)
-        scenario_cfg_1 = Config.configure_scenario("s1", [task_cfg_1], [], Frequency.DAILY)
-
-        with mock.patch("sys.argv", ["prog"]):
-            core = Core()
-            core.run()
-
-        scenario_1 = tp.create_scenario(scenario_cfg_1)
-        tp.submit(scenario_1)
-
-        with pytest.raises(ConfigurationUpdateBlocked):
-            Config.configure_scenario("block_scenario", set([task_cfg_1]))
-        core.stop()
-
-    def test_block_config_when_core_is_running_in_standalone_mode(self):
+    def test_block_config_when_core_is_running(self):
         Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
         Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE)
-
-        input_cfg_1 = Config.configure_data_node(id="i1", storage_type="pickle", default_data=1, scope=Scope.SCENARIO)
+        input_cfg_1 = Config.configure_data_node(id="i1", storage_type="pickle", scope=Scope.SCENARIO, default_data=1)
         output_cfg_1 = Config.configure_data_node(id="o1", storage_type="pickle", scope=Scope.SCENARIO)
         output_cfg_1 = Config.configure_data_node(id="o1", storage_type="pickle", scope=Scope.SCENARIO)
         task_cfg_1 = Config.configure_task("t1", print, input_cfg_1, output_cfg_1)
         task_cfg_1 = Config.configure_task("t1", print, input_cfg_1, output_cfg_1)
-        scenario_cfg_1 = Config.configure_scenario("s1", [task_cfg_1], [], Frequency.DAILY)
+        Config.configure_scenario("s1", [task_cfg_1], [], Frequency.DAILY)
 
 
         with mock.patch("sys.argv", ["prog"]):
         with mock.patch("sys.argv", ["prog"]):
             core = Core()
             core = Core()
             core.run()
             core.run()
 
 
-        scenario_1 = tp.create_scenario(scenario_cfg_1)
-        tp.submit(scenario_1, wait=True)
-
         with pytest.raises(ConfigurationUpdateBlocked):
         with pytest.raises(ConfigurationUpdateBlocked):
-            Config.configure_scenario("block_scenario", set([task_cfg_1]))
+            Config.configure_scenario("block_scenario", [task_cfg_1])
         core.stop()
         core.stop()
 
 
     def test_get_data_node(self, data_node):
     def test_get_data_node(self, data_node):
@@ -640,12 +623,12 @@ class TestTaipy:
     def test_export_scenario_filesystem(self):
     def test_export_scenario_filesystem(self):
         shutil.rmtree("./tmp", ignore_errors=True)
         shutil.rmtree("./tmp", ignore_errors=True)
 
 
-        input_cfg_1 = Config.configure_data_node(id="i1", storage_type="pickle", default_data=1, scope=Scope.SCENARIO)
+        input_cfg_1 = Config.configure_data_node(id="i1", storage_type="pickle", scope=Scope.SCENARIO, default_data=1)
         output_cfg_1 = Config.configure_data_node(id="o1", storage_type="pickle", scope=Scope.SCENARIO)
         output_cfg_1 = Config.configure_data_node(id="o1", storage_type="pickle", scope=Scope.SCENARIO)
         task_cfg_1 = Config.configure_task("t1", print, input_cfg_1, output_cfg_1)
         task_cfg_1 = Config.configure_task("t1", print, input_cfg_1, output_cfg_1)
         scenario_cfg_1 = Config.configure_scenario("s1", [task_cfg_1], [], Frequency.DAILY)
         scenario_cfg_1 = Config.configure_scenario("s1", [task_cfg_1], [], Frequency.DAILY)
 
 
-        input_cfg_2 = Config.configure_data_node(id="i2", storage_type="pickle", default_data=2, scope=Scope.SCENARIO)
+        input_cfg_2 = Config.configure_data_node(id="i2", storage_type="pickle", scope=Scope.SCENARIO, default_data=2)
         output_cfg_2 = Config.configure_data_node(id="o2", storage_type="pickle", scope=Scope.SCENARIO)
         output_cfg_2 = Config.configure_data_node(id="o2", storage_type="pickle", scope=Scope.SCENARIO)
         task_cfg_2 = Config.configure_task("t2", print, input_cfg_2, output_cfg_2)
         task_cfg_2 = Config.configure_task("t2", print, input_cfg_2, output_cfg_2)
         scenario_cfg_2 = Config.configure_scenario("s2", [task_cfg_2], [], Frequency.DAILY)
         scenario_cfg_2 = Config.configure_scenario("s2", [task_cfg_2], [], Frequency.DAILY)
@@ -700,9 +683,9 @@ class TestTaipy:
         dn_config_2 = Config.configure_data_node(id="d2", storage_type="in_memory", scope=Scope.SCENARIO)
         dn_config_2 = Config.configure_data_node(id="d2", storage_type="in_memory", scope=Scope.SCENARIO)
         dn_config_3 = Config.configure_data_node(id="d3", storage_type="in_memory", scope=Scope.SCENARIO)
         dn_config_3 = Config.configure_data_node(id="d3", storage_type="in_memory", scope=Scope.SCENARIO)
         dn_config_4 = Config.configure_data_node(id="d4", storage_type="in_memory", scope=Scope.SCENARIO)
         dn_config_4 = Config.configure_data_node(id="d4", storage_type="in_memory", scope=Scope.SCENARIO)
-        task_config_1 = Config.configure_task("t1", print, dn_config_1, dn_config_2)
-        task_config_2 = Config.configure_task("t2", print, dn_config_2, dn_config_3)
-        scenario_cfg_1 = Config.configure_scenario("s1", [task_config_1, task_config_2], [dn_config_4], Frequency.DAILY)
+        t_config_1 = Config.configure_task("t1", print, dn_config_1, dn_config_2)
+        t_config_2 = Config.configure_task("t2", print, dn_config_2, dn_config_3)
+        scenario_cfg_1 = Config.configure_scenario("s1", [t_config_1, t_config_2], [dn_config_4], Frequency.DAILY)
 
 
         scenario = tp.create_scenario(scenario_cfg_1)
         scenario = tp.create_scenario(scenario_cfg_1)
         tasks = scenario.tasks
         tasks = scenario.tasks
@@ -746,16 +729,11 @@ class TestTaipy:
         assert_result_parents_and_expected_parents(parents, expected_parents)
         assert_result_parents_and_expected_parents(parents, expected_parents)
 
 
     def test_get_cycles_scenarios(self):
     def test_get_cycles_scenarios(self):
-        scenario_cfg_1 = Config.configure_scenario(
-            "s1",
-            set(),
-            set(),
-            Frequency.DAILY,
-        )
-        scenario_cfg_2 = Config.configure_scenario("s2", set(), set(), Frequency.WEEKLY)
-        scenario_cfg_3 = Config.configure_scenario("s3", set(), set(), Frequency.MONTHLY)
-        scenario_cfg_4 = Config.configure_scenario("s4", set(), set(), Frequency.YEARLY)
-        scenario_cfg_5 = Config.configure_scenario("s5", set(), set(), None)
+        scenario_cfg_1 = Config.configure_scenario("s1", [], [], Frequency.DAILY)
+        scenario_cfg_2 = Config.configure_scenario("s2", [], [], Frequency.WEEKLY)
+        scenario_cfg_3 = Config.configure_scenario("s3", [], [], Frequency.MONTHLY)
+        scenario_cfg_4 = Config.configure_scenario("s4", [], [], Frequency.YEARLY)
+        scenario_cfg_5 = Config.configure_scenario("s5", [], [], None)
 
 
         now = datetime.datetime.now()
         now = datetime.datetime.now()
         scenario_1_1 = tp.create_scenario(scenario_cfg_1, now)
         scenario_1_1 = tp.create_scenario(scenario_cfg_1, now)

+ 23 - 8
tests/gui_core/test_context_is_readable.py

@@ -13,17 +13,21 @@ from unittest.mock import Mock, patch
 
 
 import pytest
 import pytest
 
 
+import typing as t
+
 from taipy.config.common.scope import Scope
 from taipy.config.common.scope import Scope
-from taipy.core import Job, Scenario, Task
+from taipy.core import Job, JobId, Scenario, Task
 from taipy.core.data.pickle import PickleDataNode
 from taipy.core.data.pickle import PickleDataNode
+from taipy.core.submission.submission import Submission
 from taipy.gui import Gui
 from taipy.gui import Gui
-from taipy.gui_core._context import _GuiCoreContext
+from taipy.gui_core._context import _GuiCoreContext, _SubmissionDetails
 
 
 a_scenario = Scenario("scenario_config_id", [], {}, sequences={"sequence": {}})
 a_scenario = Scenario("scenario_config_id", [], {}, sequences={"sequence": {}})
 a_task = Task("task_config_id", {}, print)
 a_task = Task("task_config_id", {}, print)
-a_job = Job("JOB_job_id", a_task, "submit_id", a_scenario.id)
+a_job = Job(t.cast(JobId, "JOB_job_id"), a_task, "submit_id", a_scenario.id)
 a_job.isfinished = lambda s: True
 a_job.isfinished = lambda s: True
 a_datanode = PickleDataNode("data_node_config_id", Scope.SCENARIO)
 a_datanode = PickleDataNode("data_node_config_id", Scope.SCENARIO)
+a_submission = Submission(a_scenario.id, "Scenario", a_scenario.config_id)
 
 
 
 
 def mock_is_readable_false(entity_id):
 def mock_is_readable_false(entity_id):
@@ -41,6 +45,8 @@ def mock_core_get(entity_id):
         return a_job
         return a_job
     if entity_id == a_datanode.id:
     if entity_id == a_datanode.id:
         return a_datanode
         return a_datanode
+    if entity_id == a_submission.id:
+        return a_submission
     return a_task
     return a_task
 
 
 
 
@@ -138,21 +144,30 @@ class TestGuiCoreContext_is_readable:
                 assert str(assign.call_args.args[1]).endswith("is not readable.")
                 assert str(assign.call_args.args[1]).endswith("is not readable.")
 
 
     def test_scenario_status_callback(self):
     def test_scenario_status_callback(self):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get) as mockget:
+        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get) as mockget, patch(
+            "taipy.gui_core._context.core_get_submission", side_effect=mock_core_get
+        ):
             mockget.reset_mock()
             mockget.reset_mock()
             gui_core_context = _GuiCoreContext(Mock())
             gui_core_context = _GuiCoreContext(Mock())
-            gui_core_context.scenario_status_callback(a_job.id)
+
+            def sub_cb():
+                return True
+
+            gui_core_context.client_submission[a_submission.id] = _SubmissionDetails(
+                "client_id", "", sub_cb, a_submission
+            )
+            gui_core_context.scenario_status_callback(a_submission.id)
             mockget.assert_called()
             mockget.assert_called()
             found = False
             found = False
             for call in mockget.call_args_list:
             for call in mockget.call_args_list:
-                if call.args[0] == a_job.id:
+                if call.args[0] == a_scenario.id:
                     found = True
                     found = True
                     break
                     break
             assert found is True
             assert found is True
             mockget.reset_mock()
             mockget.reset_mock()
 
 
-            with patch("taipy.gui_core._context.is_readable", side_effect=mock_is_readable_false):
-                gui_core_context.scenario_status_callback(a_job.id)
+            with patch("taipy.gui_core._context.is_readable_submission", side_effect=mock_is_readable_false):
+                gui_core_context.scenario_status_callback(a_submission.id)
                 mockget.assert_not_called()
                 mockget.assert_not_called()
 
 
     def test_data_node_adapter(self):
     def test_data_node_adapter(self):

+ 0 - 233
tests/gui_core/test_context_submission_status.py

@@ -1,233 +0,0 @@
-# Copyright 2023 Avaiga Private Limited
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-#        http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
-# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations under the License.
-
-from unittest.mock import Mock, patch
-
-import pytest
-
-from taipy.core import Status
-from taipy.gui_core._context import _GuiCoreContext, _SubmissionStatus
-
-
-class MockJob:
-    def __init__(self, id: str, status):
-        self.status = status
-        self.id = id
-
-    def is_failed(self):
-        return self.status == Status.FAILED
-
-    def is_canceled(self):
-        return self.status == Status.CANCELED
-
-    def is_blocked(self):
-        return self.status == Status.BLOCKED
-
-    def is_pending(self):
-        return self.status == Status.PENDING
-
-    def is_running(self):
-        return self.status == Status.RUNNING
-
-    def is_completed(self):
-        return self.status == Status.COMPLETED
-
-    def is_skipped(self):
-        return self.status == Status.SKIPPED
-
-    def is_abandoned(self):
-        return self.status == Status.ABANDONED
-
-    def is_submitted(self):
-        return self.status == Status.SUBMITTED
-
-
-def mock_core_get(entity_id):
-    jobs = {
-        "job0_submitted": MockJob("job0_submitted", Status.SUBMITTED),
-        "job1_failed": MockJob("job1_failed", Status.FAILED),
-        "job2_canceled": MockJob("job2_canceled", Status.CANCELED),
-        "job3_blocked": MockJob("job3_blocked", Status.BLOCKED),
-        "job4_pending": MockJob("job4_pending", Status.PENDING),
-        "job5_running": MockJob("job5_running", Status.RUNNING),
-        "job6_completed": MockJob("job6_completed", Status.COMPLETED),
-        "job7_skipped": MockJob("job7_skipped", Status.SKIPPED),
-        "job8_abandoned": MockJob("job8_abandoned", Status.ABANDONED),
-    }
-    return jobs[entity_id]
-
-
-class TestGuiCoreContext_SubmissionStatus:
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job1_failed"], _SubmissionStatus.FAILED),
-            (["job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job3_blocked"], _SubmissionStatus.BLOCKED),
-            (["job4_pending"], _SubmissionStatus.WAITING),
-            (["job5_running"], _SubmissionStatus.RUNNING),
-            (["job6_completed"], _SubmissionStatus.COMPLETED),
-            (["job7_skipped"], _SubmissionStatus.COMPLETED),
-            (["job8_abandoned"], _SubmissionStatus.UNDEFINED),
-        ],
-    )
-    def test_single_job(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job1_failed", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job2_canceled"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job3_blocked"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job4_pending"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job5_running"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job6_completed"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job7_skipped"], _SubmissionStatus.FAILED),
-            (["job1_failed", "job8_abandoned"], _SubmissionStatus.FAILED),
-            (["job2_canceled", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job3_blocked", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job4_pending", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job5_running", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job6_completed", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job7_skipped", "job1_failed"], _SubmissionStatus.FAILED),
-            (["job8_abandoned", "job1_failed"], _SubmissionStatus.FAILED),
-        ],
-    )
-    def test_one_failed_job(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job2_canceled", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job3_blocked"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job4_pending"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job5_running"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job6_completed"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job7_skipped"], _SubmissionStatus.CANCELED),
-            (["job2_canceled", "job8_abandoned"], _SubmissionStatus.CANCELED),
-            (["job3_blocked", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job4_pending", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job5_running", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job6_completed", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job7_skipped", "job2_canceled"], _SubmissionStatus.CANCELED),
-            (["job8_abandoned", "job2_canceled"], _SubmissionStatus.CANCELED),
-        ],
-    )
-    def test_no_failed_one_cancel(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job4_pending", "job3_blocked"], _SubmissionStatus.WAITING),
-            (["job4_pending", "job4_pending"], _SubmissionStatus.WAITING),
-            (["job4_pending", "job6_completed"], _SubmissionStatus.WAITING),
-            (["job4_pending", "job7_skipped"], _SubmissionStatus.WAITING),
-            (["job3_blocked", "job4_pending"], _SubmissionStatus.WAITING),
-            (["job6_completed", "job4_pending"], _SubmissionStatus.WAITING),
-            (["job7_skipped", "job4_pending"], _SubmissionStatus.WAITING),
-        ],
-    )
-    def test_no_failed_or_cancel_one_pending(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job5_running", "job3_blocked"], _SubmissionStatus.RUNNING),
-            (["job5_running", "job4_pending"], _SubmissionStatus.RUNNING),
-            (["job5_running", "job5_running"], _SubmissionStatus.RUNNING),
-            (["job5_running", "job6_completed"], _SubmissionStatus.RUNNING),
-            (["job5_running", "job7_skipped"], _SubmissionStatus.RUNNING),
-            (["job3_blocked", "job5_running"], _SubmissionStatus.RUNNING),
-            (["job4_pending", "job5_running"], _SubmissionStatus.RUNNING),
-            (["job6_completed", "job5_running"], _SubmissionStatus.RUNNING),
-            (["job7_skipped", "job5_running"], _SubmissionStatus.RUNNING),
-        ],
-    )
-    def test_no_failed_cancel_nor_pending_one_running(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job3_blocked", "job3_blocked"], _SubmissionStatus.BLOCKED),
-            (["job3_blocked", "job6_completed"], _SubmissionStatus.BLOCKED),
-            (["job3_blocked", "job7_skipped"], _SubmissionStatus.BLOCKED),
-            (["job6_completed", "job3_blocked"], _SubmissionStatus.BLOCKED),
-            (["job7_skipped", "job3_blocked"], _SubmissionStatus.BLOCKED),
-        ],
-    )
-    def test_no_failed_cancel_pending_nor_running_one_blocked(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job6_completed", "job6_completed"], _SubmissionStatus.COMPLETED),
-            (["job6_completed", "job7_skipped"], _SubmissionStatus.COMPLETED),
-            (["job7_skipped", "job6_completed"], _SubmissionStatus.COMPLETED),
-            (["job7_skipped", "job7_skipped"], _SubmissionStatus.COMPLETED),
-        ],
-    )
-    def test_only_completed_or_skipped(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    @pytest.mark.parametrize(
-        "job_ids, expected_status",
-        [
-            (["job3_blocked", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job4_pending", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job5_running", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job6_completed", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job7_skipped", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job8_abandoned"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job3_blocked"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job4_pending"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job5_running"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job6_completed"], _SubmissionStatus.UNDEFINED),
-            (["job8_abandoned", "job7_skipped"], _SubmissionStatus.UNDEFINED),
-        ],
-    )
-    def test_WRONG_CASE_abandoned_without_cancel_or_failed(self, job_ids, expected_status):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status(job_ids)
-            assert status == expected_status
-
-    def test_no_job(self):
-        with patch("taipy.gui_core._context.core_get", side_effect=mock_core_get):
-            gui_core_context = _GuiCoreContext(Mock())
-            status = gui_core_context._get_submittable_status([])
-            assert status == _SubmissionStatus.UNDEFINED

+ 1 - 0
tools/packages/taipy-core/setup.requirements.txt

@@ -5,4 +5,5 @@ modin[dask]>=0.23.0,<1.0
 pymongo[srv]>=4.2.0,<5.0
 pymongo[srv]>=4.2.0,<5.0
 sqlalchemy>=2.0.16,<2.1
 sqlalchemy>=2.0.16,<2.1
 toml>=0.10,<0.11
 toml>=0.10,<0.11
+boto3>=1.29.4,<1.33.12
 taipy-config
 taipy-config