浏览代码

Refactor terminology: Updated 'Parameters' and 'Args' to 'Arguments' for consistency across the codebase. (#2046)

* Refactor terminology: Updated 'Parameters' and 'Args' to 'Arguments' for consistency across the codebase.

---------

Co-authored-by: Jean-Robin <jeanrobin.medori@avaiga.com>
Manish Gupta 7 月之前
父节点
当前提交
1aa13d9281
共有 41 个文件被更改,包括 175 次插入175 次删除
  1. 1 1
      taipy/_run.py
  2. 1 1
      taipy/common/config/_config.py
  3. 2 2
      taipy/common/config/_config_comparator/_config_comparator.py
  4. 6 6
      taipy/common/config/config.py
  5. 28 28
      taipy/common/config/config.pyi
  6. 1 1
      taipy/core/_orchestrator/_dispatcher/_development_job_dispatcher.py
  7. 3 3
      taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py
  8. 1 1
      taipy/core/_orchestrator/_dispatcher/_standalone_job_dispatcher.py
  9. 3 3
      taipy/core/_orchestrator/_orchestrator.py
  10. 9 9
      taipy/core/_repository/_abstract_repository.py
  11. 1 1
      taipy/core/common/_mongo_connector.py
  12. 1 1
      taipy/core/common/_utils.py
  13. 1 1
      taipy/core/config/core_section.py
  14. 14 14
      taipy/core/config/data_node_config.py
  15. 1 1
      taipy/core/config/job_config.py
  16. 5 5
      taipy/core/config/scenario_config.py
  17. 2 2
      taipy/core/config/task_config.py
  18. 1 1
      taipy/core/data/_file_datanode_mixin.py
  19. 1 1
      taipy/core/data/csv.py
  20. 6 6
      taipy/core/data/data_node.py
  21. 1 1
      taipy/core/data/excel.py
  22. 4 4
      taipy/core/data/mongo.py
  23. 2 2
      taipy/core/data/parquet.py
  24. 1 1
      taipy/core/job/job.py
  25. 1 1
      taipy/core/notification/core_event_consumer.py
  26. 1 1
      taipy/core/notification/event.py
  27. 4 4
      taipy/core/notification/notifier.py
  28. 1 1
      taipy/core/orchestrator.py
  29. 1 1
      taipy/core/scenario/_scenario_manager.py
  30. 13 13
      taipy/core/scenario/scenario.py
  31. 4 4
      taipy/core/sequence/sequence.py
  32. 27 27
      taipy/core/taipy.py
  33. 2 2
      taipy/core/task/task.py
  34. 1 1
      taipy/gui/_gui_section.py
  35. 2 2
      taipy/rest/api/resources/cycle.py
  36. 5 5
      taipy/rest/api/resources/datanode.py
  37. 4 4
      taipy/rest/api/resources/job.py
  38. 4 4
      taipy/rest/api/resources/scenario.py
  39. 4 4
      taipy/rest/api/resources/sequence.py
  40. 4 4
      taipy/rest/api/resources/task.py
  41. 1 1
      taipy/rest/rest.py

+ 1 - 1
taipy/_run.py

@@ -30,7 +30,7 @@ def _run(*services: _AppType, **kwargs) -> t.Optional[Flask]:
 
     A Taipy service is an instance of a class that runs code as a Web application.
 
-    Parameters:
+    Arguments:
         *services (Union[`Gui^`, `Rest^`, `Orchestrator^`]): Services to run, as separate arguments.<br/>
             If several services are provided, all the services run simultaneously.<br/>
             If this is empty or set to None, this method does nothing.

+ 1 - 1
taipy/common/config/_config.py

@@ -77,7 +77,7 @@ class _Config:
         """Loop through attributes of a Section to find if any attribute has a list of Section as value.
         If there is, update each nested Section by the corresponding instance in self.
 
-        Args:
+        Arguments:
             section (Section): The Section to search for nested sections.
         """
         for _, attr_value in vars(section).items():

+ 2 - 2
taipy/common/config/_config_comparator/_config_comparator.py

@@ -41,7 +41,7 @@ class _ConfigComparator:
     ):
         """Compare between 2 _Config object to check for compatibility.
 
-        Args:
+        Arguments:
             old_config (_Config): The old _Config.
             new_config (_Config): The new _Config.
             old_version_number (str, optional): The old version number for logging. Defaults to None.
@@ -79,7 +79,7 @@ class _ConfigComparator:
     ):
         """Compare between 2 _Config object to check for compatibility.
 
-        Args:
+        Arguments:
             config_1 (_Config): The old _Config.
             config_2 (_Config): The new _Config.
             version_number_1 (str): The old version number for logging.

+ 6 - 6
taipy/common/config/config.py

@@ -139,7 +139,7 @@ class Config:
 
         The current Python configuration is replaced and the Config compilation is triggered.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
         cls.__logger.info(f"Loading configuration. Filename: '{filename}'")
@@ -157,7 +157,7 @@ class Config:
         Note:
             If *filename* already exists, it is overwritten.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the file to export.
         """
         cls._serializer._write(cls._python_config, filename)
@@ -175,7 +175,7 @@ class Config:
         Note:
             If *filename* already exists, it is overwritten.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the file to export.
         """
         cls._serializer._write(cls._applied_config, filename)
@@ -185,7 +185,7 @@ class Config:
     def restore(cls, filename: str) -> None:
         """Restore a configuration file and replace the current applied configuration.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
         cls.__logger.info(f"Restoring configuration. Filename: '{filename}'")
@@ -197,7 +197,7 @@ class Config:
     def override(cls, filename: str) -> None:
         """Load a configuration from a file and overrides the current config.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
         cls.__logger.info(f"Loading configuration. Filename: '{filename}'")
@@ -221,7 +221,7 @@ class Config:
     def configure_global_app(cls, **properties) -> GlobalAppConfig:
         """Configure the global application.
 
-        Parameters:
+        Arguments:
             **properties (Dict[str, Any]): A dictionary of additional properties.
 
         Returns:

+ 28 - 28
taipy/common/config/config.pyi

@@ -121,7 +121,7 @@ class Config:
 
         The current Python configuration is replaced and the Config compilation is triggered.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
 
@@ -135,7 +135,7 @@ class Config:
         Note:
             If *filename* already exists, it is overwritten.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the file to export.
         """
 
@@ -152,7 +152,7 @@ class Config:
         Note:
             If *filename* already exists, it is overwritten.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the file to export.
         """
 
@@ -161,7 +161,7 @@ class Config:
     def restore(cls, filename: str) -> None:
         """Restore a configuration file and replace the current applied configuration.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
 
@@ -170,7 +170,7 @@ class Config:
     def override(cls, filename: str) -> None:
         """Load a configuration from a file and overrides the current config.
 
-        Parameters:
+        Arguments:
             filename (Union[str, Path]): The path of the toml configuration file to load.
         """
 
@@ -187,7 +187,7 @@ class Config:
     def configure_global_app(cls, **properties) -> GlobalAppConfig:
         """Configure the global application.
 
-        Parameters:
+        Arguments:
             **properties (Dict[str, Any]): A dictionary of additional properties.
 
         Returns:
@@ -266,7 +266,7 @@ class Config:
     ) -> "ScenarioConfig":
         """Configure a new scenario configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new scenario configuration.
             task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this
                 scenario configuration. The default value is None.
@@ -306,7 +306,7 @@ class Config:
         where all scenario configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this
                 scenario configuration.
             additional_data_node_configs (Optional[List[DataNodeConfig^]]): The list of additional data nodes
@@ -339,7 +339,7 @@ class Config:
         where all data node configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             storage_type (str): The default storage type for all data node configurations.
                 The possible values are *"pickle"* (the default value), *"csv"*, *"excel"*,
                 *"sql"*, *"mongo_collection"*, *"in_memory"*, *"json"*, *"parquet"*, *"generic"*,
@@ -367,7 +367,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new data node configuration from an existing one.
 
-        Parameters:
+        Arguments:
             source_configuration (DataNodeConfig): The source data node configuration.
             id (str): The unique identifier of the new data node configuration.
             **properties (dict[str, any]): A keyworded variable length list of additional arguments.<br/>
@@ -388,7 +388,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new data node configuration.
             storage_type (Optional[str]): The data node configuration storage type. The possible values
                 are None (which is the default value of *"pickle"*, unless it has been overloaded by the
@@ -425,7 +425,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new CSV data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new CSV data node configuration.
             default_path (Optional[str]): The default path of the CSV file.
             encoding (Optional[str]): The encoding of the CSV file.
@@ -460,7 +460,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new JSON data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new JSON data node configuration.
             default_path (Optional[str]): The default path of the JSON file.
             encoding (Optional[str]): The encoding of the JSON file.
@@ -496,7 +496,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new Parquet data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Parquet data node configuration.
             default_path (Optional[str]): The default path of the Parquet file.
             engine (Optional[str]): Parquet library to use. Possible values are *"fastparquet"* or
@@ -508,8 +508,8 @@ class Config:
                 function.
             write_kwargs (Optional[dict]): Additional parameters passed to the
                 `pandas.DataFrame.write_parquet()` function.<br/>
-                The parameters in *read_kwargs* and *write_kwargs* have a **higher precedence** than the
-                top-level parameters which are also passed to Pandas.
+                The parameters in *read_kwargs* and *write_kwargs* have a **higher precedence**
+                than the top-level parameters which are also passed to Pandas.
             exposed_type (Optional[str]): The exposed type of the data read from Parquet file.<br/>
                 The default value is `pandas`.
             scope (Optional[Scope^]): The scope of the Parquet data node configuration.<br/>
@@ -540,7 +540,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new Excel data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Excel data node configuration.
             default_path (Optional[str]): The path of the Excel file.
             has_header (Optional[bool]): If True, indicates that the Excel file has a header.
@@ -576,7 +576,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new generic data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new generic data node configuration.
             read_fct (Optional[Callable]): The Python function called to read the data.
             write_fct (Optional[Callable]): The Python function called to write the data.
@@ -610,7 +610,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new *in-memory* data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new in_memory data node configuration.
             default_data (Optional[any]): The default data of the data nodes instantiated from
                 this in_memory data node configuration.
@@ -642,7 +642,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new pickle data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new pickle data node configuration.
             default_path (Optional[str]): The path of the pickle file.
             default_data (Optional[any]): The default data of the data nodes instantiated from
@@ -685,7 +685,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new SQL table data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new SQL data node configuration.
             db_name (str): The database name, or the name of the SQLite database file.
             db_engine (str): The database engine. Possible values are *"sqlite"*, *"mssql"*, *"mysql"*,
@@ -746,7 +746,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new SQL data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new SQL data node configuration.
             db_name (str): The database name, or the name of the SQLite database file.
             db_engine (str): The database engine. Possible values are *"sqlite"*, *"mssql"*, *"mysql"*,
@@ -806,7 +806,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new Mongo collection data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Mongo collection data node configuration.
             db_name (str): The database name.
             collection_name (str): The collection in the database to read from and to write the data to.
@@ -856,7 +856,7 @@ class Config:
     ) -> "DataNodeConfig":
         """Configure a new S3 object data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new S3 Object data node configuration.
             aws_access_key (str): Amazon Web Services (AWS) ID for to identify account.
             aws_secret_access_key (str): Amazon Web Services (AWS) access key to authenticate
@@ -906,7 +906,7 @@ class Config:
     ) -> "TaskConfig":
         """Configure a new task configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of this task configuration.
             function (Callable): The python function called by Taipy to run the task.
             input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the
@@ -938,7 +938,7 @@ class Config:
         where all task configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             function (Callable): The python function called by Taipy to run the task.
             input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the
                 input data node configurations. This can be a unique data node
@@ -962,7 +962,7 @@ class Config:
     ) -> "JobConfig":
         """Configure job execution.
 
-        Parameters:
+        Arguments:
             mode (Optional[str]): The job execution mode.
                 Possible values are: *"standalone"* or *"development"*.
             max_nb_of_workers (Optional[int, str]): Parameter used only in *"standalone"* mode.
@@ -992,7 +992,7 @@ class Config:
     ) -> "CoreSection":
         """Configure the Orchestrator service.
 
-        Parameters:
+        Arguments:
             root_folder (Optional[str]): Path of the base folder for the taipy application.
                 The default value is "./taipy/"
             storage_folder (str): Folder name used to store user data. The default value is "user_data/". It is used in

+ 1 - 1
taipy/core/_orchestrator/_dispatcher/_development_job_dispatcher.py

@@ -41,7 +41,7 @@ class _DevelopmentJobDispatcher(_JobDispatcher):
     def _dispatch(self, job: Job):
         """Dispatches the given `Job^` on an available worker for execution.
 
-        Parameters:
+        Arguments:
             job (Job^): The job to submit on an executor with an available worker.
         """
         rs = _TaskFunctionWrapper(job.id, job.task).execute()

+ 3 - 3
taipy/core/_orchestrator/_dispatcher/_job_dispatcher.py

@@ -52,7 +52,7 @@ class _JobDispatcher(threading.Thread):
     def stop(self, wait: bool = True, timeout: Optional[float] = None):
         """Stop the dispatcher.
 
-        Parameters:
+        Arguments:
             wait (bool): If True, the method will wait for the dispatcher to stop.
             timeout (Optional[float]): The maximum time to wait. If None, the method will wait indefinitely.
         """
@@ -117,7 +117,7 @@ class _JobDispatcher(threading.Thread):
         """
         Returns True if the task has no output or if at least one input was modified since the latest run.
 
-        Parameters:
+        Arguments:
              task (Task^): The task to run.
 
         Returns:
@@ -142,7 +142,7 @@ class _JobDispatcher(threading.Thread):
         """
         Dispatches the given `Job^` on an available worker for execution.
 
-        Parameters:
+        Arguments:
             job (Job^): The job to submit on an executor with an available worker.
         """
         raise NotImplementedError

+ 1 - 1
taipy/core/_orchestrator/_dispatcher/_standalone_job_dispatcher.py

@@ -52,7 +52,7 @@ class _StandaloneJobDispatcher(_JobDispatcher):
     def _dispatch(self, job: Job):
         """Dispatches the given `Job^` on an available worker for execution.
 
-        Parameters:
+        Arguments:
             job (Job^): The job to submit on an executor with an available worker.
         """
         with self._nb_available_workers_lock:

+ 3 - 3
taipy/core/_orchestrator/_orchestrator.py

@@ -57,7 +57,7 @@ class _Orchestrator(_AbstractOrchestrator):
     ) -> Submission:
         """Submit the given `Scenario^` or `Sequence^` for an execution.
 
-        Parameters:
+        Arguments:
              submittable (Union[Scenario^, Sequence^]): The scenario or sequence to submit for execution.
              callbacks: The optional list of functions that should be executed on jobs status change.
              force (bool) : Enforce execution of the scenario's or sequence's tasks even if their output data
@@ -114,7 +114,7 @@ class _Orchestrator(_AbstractOrchestrator):
     ) -> Submission:
         """Submit the given `Task^` for an execution.
 
-        Parameters:
+        Arguments:
              task (Task^): The task to submit for execution.
              callbacks: The optional list of functions that should be executed on job status change.
              force (bool): Enforce execution of the task even if its output data nodes are cached.
@@ -226,7 +226,7 @@ class _Orchestrator(_AbstractOrchestrator):
     def _is_blocked(cls, obj: Union[Task, Job]) -> bool:
         """Returns True if the execution of the `Job^` or the `Task^` is blocked by the execution of another `Job^`.
 
-        Parameters:
+        Arguments:
              obj (Union[Task^, Job^]): The job or task entity to run.
 
         Returns:

+ 9 - 9
taipy/core/_repository/_abstract_repository.py

@@ -27,7 +27,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Save an entity in the repository.
 
-        Parameters:
+        Arguments:
             entity: The data from an object.
         """
         raise NotImplementedError
@@ -36,7 +36,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
     def _exists(self, entity_id: str) -> bool:
         """
         Check if an entity with id entity_id exists in the repository.
-        Parameters:
+        Arguments:
             entity_id: The entity id, i.e., its primary key.
 
         Returns:
@@ -48,7 +48,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
     def _load(self, entity_id: str) -> Entity:
         """
         Retrieve the entity data from the repository.
-        Parameters:
+        Arguments:
             entity_id: The entity id, i.e., its primary key.
 
         Returns:
@@ -71,7 +71,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Delete an entity in the repository.
 
-        Parameters:
+        Arguments:
             entity_id: The id of the entity to be deleted.
         """
         raise NotImplementedError
@@ -88,7 +88,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Delete all entities from the list of ids from the repository.
 
-        Parameters:
+        Arguments:
             ids: List of ids to be deleted.
         """
         raise NotImplementedError
@@ -98,7 +98,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Delete all entities from the list of ids from the repository.
 
-        Parameters:
+        Arguments:
             attribute: The entity property that is the key to the search.
             value: The value of the attribute that are being searched.
         """
@@ -107,7 +107,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
     @abstractmethod
     def _search(self, attribute: str, value: Any, filters: Optional[List[Dict]] = None) -> List[Entity]:
         """
-        Parameters:
+        Arguments:
             attribute: The entity property that is the key to the search.
             value: The value of the attribute that are being searched.
 
@@ -121,7 +121,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Export an entity from the repository.
 
-        Parameters:
+        Arguments:
             entity_id (str): The id of the entity to be exported.
             folder_path (Union[str, pathlib.Path]): The folder path to export the entity to.
         """
@@ -131,7 +131,7 @@ class _AbstractRepository(Generic[ModelType, Entity]):
         """
         Import an entity from an exported file.
 
-        Parameters:
+        Arguments:
             folder_path (Union[str, pathlib.Path]): The folder path to export the entity to.
 
         Returns:

+ 1 - 1
taipy/core/common/_mongo_connector.py

@@ -22,7 +22,7 @@ def _connect_mongodb(
     The `"mongodb_extra_args"` passed by the user is originally a dictionary, but since `@lru_cache` wrapper only
     accepts hashable parameters, the `"mongodb_extra_args"` should be converted into a frozenset beforehand.
 
-    Parameters:
+    Arguments:
         db_host (str): the database host.
         db_port (int): the database port.
         db_username (str): the database username.

+ 1 - 1
taipy/core/common/_utils.py

@@ -31,7 +31,7 @@ def _retry_repository_operation(exceptions: Tuple, sleep_time: float = 0.2):
     in ``exceptions`` are thrown.
     The number of retries is defined by Config.core.read_entity_retry.
 
-    Parameters:
+    Arguments:
         exceptions (tuple): Tuple of exceptions that trigger a retry attempt.
         sleep_time (float): Time to sleep between retries.
     """

+ 1 - 1
taipy/core/config/core_section.py

@@ -390,7 +390,7 @@ class CoreSection(UniqueSection):
     ) -> "CoreSection":
         """Configure the Orchestrator service.
 
-        Parameters:
+        Arguments:
             root_folder (Optional[str]): Path of the base folder for the taipy application.
                 The default value is "./taipy/"
             storage_folder (str): Folder name used to store user data. The default value is "user_data/". It is used in

+ 14 - 14
taipy/core/config/data_node_config.py

@@ -411,7 +411,7 @@ class DataNodeConfig(Section):
         where all data node configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             storage_type (str): The default storage type for all data node configurations.
                 The possible values are *"pickle"* (the default value), *"csv"*, *"excel"*,
                 *"sql"*, *"mongo_collection"*, *"in_memory"*, *"json"*, *"parquet"*, *"generic"*,
@@ -442,7 +442,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new data node configuration from an existing one.
 
-        Parameters:
+        Arguments:
             source_configuration (DataNodeConfig): The source data node configuration.
             id (str): The unique identifier of the new data node configuration.
             **properties (dict[str, any]): A keyworded variable length list of additional arguments.<br/>
@@ -468,7 +468,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new data node configuration.
             storage_type (Optional[str]): The data node configuration storage type. The possible values
                 are None (which is the default value of *"pickle"*, unless it has been overloaded by the
@@ -523,7 +523,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new CSV data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new CSV data node configuration.
             default_path (Optional[str]): The default path of the CSV file.
             encoding (Optional[str]): The encoding of the CSV file.
@@ -568,7 +568,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new JSON data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new JSON data node configuration.
             default_path (Optional[str]): The default path of the JSON file.
             encoding (Optional[str]): The encoding of the JSON file.
@@ -614,7 +614,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new Parquet data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Parquet data node configuration.
             default_path (Optional[str]): The default path of the Parquet file.
             engine (Optional[str]): Parquet library to use. Possible values are *"fastparquet"* or
@@ -672,7 +672,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new Excel data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Excel data node configuration.
             default_path (Optional[str]): The path of the Excel file.
             has_header (Optional[bool]): If True, indicates that the Excel file has a header.
@@ -718,7 +718,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new generic data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new generic data node configuration.
             read_fct (Optional[Callable]): The Python function called to read the data.
             write_fct (Optional[Callable]): The Python function called to write the data.
@@ -762,7 +762,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new *in-memory* data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new in_memory data node configuration.
             default_data (Optional[any]): The default data of the data nodes instantiated from
                 this in_memory data node configuration.
@@ -798,7 +798,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new pickle data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new pickle data node configuration.
             default_path (Optional[str]): The path of the pickle file.
             default_data (Optional[any]): The default data of the data nodes instantiated from
@@ -847,7 +847,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new SQL table data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new SQL data node configuration.
             db_name (str): The database name, or the name of the SQLite database file.
             db_engine (str): The database engine. Possible values are *"sqlite"*, *"mssql"*, *"mysql"*,
@@ -936,7 +936,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new SQL data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new SQL data node configuration.
             db_name (str): The database name, or the name of the SQLite database file.
             db_engine (str): The database engine. Possible values are *"sqlite"*, *"mssql"*, *"mysql"*,
@@ -1027,7 +1027,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new Mongo collection data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new Mongo collection data node configuration.
             db_name (str): The database name.
             collection_name (str): The collection in the database to read from and to write the data to.
@@ -1102,7 +1102,7 @@ class DataNodeConfig(Section):
     ) -> "DataNodeConfig":
         """Configure a new S3 object data node configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new S3 Object data node configuration.
             aws_access_key (str): Amazon Web Services (AWS) ID for to identify account.
             aws_secret_access_key (str): Amazon Web Services (AWS) access key to authenticate

+ 1 - 1
taipy/core/config/job_config.py

@@ -101,7 +101,7 @@ class JobConfig(UniqueSection):
     ) -> "JobConfig":
         """Configure job execution.
 
-        Parameters:
+        Arguments:
             mode (Optional[str]): The job execution mode.
                 Possible values are: *"standalone"* or *"development"*.
             max_nb_of_workers (Optional[int, str]): Parameter used only in *"standalone"* mode.

+ 5 - 5
taipy/core/config/scenario_config.py

@@ -135,7 +135,7 @@ class ScenarioConfig(Section):
     def add_comparator(self, dn_config_id: str, comparator: Callable) -> None:
         """Add a comparator to the scenario configuration.
 
-        Parameters:
+        Arguments:
             dn_config_id (str): The data node configuration id to which the comparator
                 will be applied.
             comparator (Callable): The comparator function to be added.
@@ -153,7 +153,7 @@ class ScenarioConfig(Section):
         When a `Scenario^` is instantiated from this configuration, the
         sequence descriptions are used to add new sequences to the scenario.
 
-        Parameters:
+        Arguments:
             sequences (Dict[str, List[TaskConfig]]): Dictionary of sequence descriptions.
         """
         self.sequences.update(sequences)
@@ -161,7 +161,7 @@ class ScenarioConfig(Section):
     def remove_sequences(self, sequence_names: Union[str, List[str]]) -> None:
         """Remove sequence descriptions from the scenario configuration.
 
-        Parameters:
+        Arguments:
             sequence_names (Union[str, List[str]]): The name of the sequence or a list
                 of sequence names.
         """
@@ -289,7 +289,7 @@ class ScenarioConfig(Section):
     ) -> "ScenarioConfig":
         """Configure a new scenario configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of the new scenario configuration.
             task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this
                 scenario configuration. The default value is None.
@@ -340,7 +340,7 @@ class ScenarioConfig(Section):
         where all scenario configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             task_configs (Optional[List[TaskConfig^]]): The list of task configurations used by this
                 scenario configuration.
             additional_data_node_configs (Optional[List[DataNodeConfig^]]): The list of additional data nodes

+ 2 - 2
taipy/core/config/task_config.py

@@ -172,7 +172,7 @@ class TaskConfig(Section):
     ) -> "TaskConfig":
         """Configure a new task configuration.
 
-        Parameters:
+        Arguments:
             id (str): The unique identifier of this task configuration.
             function (Callable): The python function called by Taipy to run the task.
             input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the
@@ -207,7 +207,7 @@ class TaskConfig(Section):
         where all task configuration objects will find their default
         values when needed.
 
-        Parameters:
+        Arguments:
             function (Callable): The python function called by Taipy to run the task.
             input (Optional[Union[DataNodeConfig^, List[DataNodeConfig^]]]): The list of the
                 input data node configurations. This can be a unique data node

+ 1 - 1
taipy/core/data/_file_datanode_mixin.py

@@ -103,7 +103,7 @@ class _FileDataNodeMixin(object):
     def _upload(self, path: str, upload_checker: Optional[Callable[[str, Any], bool]] = None) -> ReasonCollection:
         """Upload a file data to the data node.
 
-        Parameters:
+        Arguments:
             path (str): The path of the file to upload to the data node.
             upload_checker (Optional[Callable[[str, Any], bool]]): A function to check if the upload is allowed.
                 The function takes the title of the upload data and the data itself as arguments and returns

+ 1 - 1
taipy/core/data/csv.py

@@ -119,7 +119,7 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
     def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None):
         """Write a selection of columns.
 
-        Parameters:
+        Arguments:
             data (Any): The data to write.
             columns (Optional[List[str]]): The list of column names to write.
             job_id (JobId): An optional identifier of the writer.

+ 6 - 6
taipy/core/data/data_node.py

@@ -418,7 +418,7 @@ class DataNode(_Entity, _Labeled):
     def append(self, data, job_id: Optional[JobId] = None, **kwargs: Dict[str, Any]):
         """Append some data to this data node.
 
-        Parameters:
+        Arguments:
             data (Any): The data to write to this data node.
             job_id (JobId): An optional identifier of the writer.
             **kwargs (dict[str, any]): Extra information to attach to the edit document
@@ -434,7 +434,7 @@ class DataNode(_Entity, _Labeled):
     def write(self, data, job_id: Optional[JobId] = None, **kwargs: Dict[str, Any]):
         """Write some data to this data node.
 
-        Parameters:
+        Arguments:
             data (Any): The data to write to this data node.
             job_id (JobId): An optional identifier of the writer.
             **kwargs (dict[str, any]): Extra information to attach to the edit document
@@ -450,7 +450,7 @@ class DataNode(_Entity, _Labeled):
     def track_edit(self, **options):
         """Creates and adds a new entry in the edits attribute without writing the data.
 
-        Parameters:
+        Arguments:
             options (dict[str, any]): track `timestamp`, `comments`, `job_id`. The others are user-custom, users can
                 use options to attach any information to an external edit of a data node.
         """
@@ -468,7 +468,7 @@ class DataNode(_Entity, _Labeled):
         Note:
             The data node can be unlocked with the method `(DataNode.)unlock_edit()^`.
 
-        Parameters:
+        Arguments:
             editor_id (Optional[str]): The editor's identifier.
         """
         if editor_id:
@@ -492,7 +492,7 @@ class DataNode(_Entity, _Labeled):
         Note:
             The data node can be locked with the method `(DataNode.)lock_edit()^`.
 
-        Parameters:
+        Arguments:
             editor_id (Optional[str]): The editor's identifier.
         """
         if (
@@ -515,7 +515,7 @@ class DataNode(_Entity, _Labeled):
         If multiple filter operators are provided, filtered data will be joined based on the
         join operator (*AND* or *OR*).
 
-        Parameters:
+        Arguments:
             operators (Union[List[Tuple], Tuple]): A 3-element tuple or a list of 3-element tuples,
                 each is in the form of (key, value, `Operator^`).
             join_operator (JoinOperator^): The operator used to join the multiple filter

+ 1 - 1
taipy/core/data/excel.py

@@ -122,7 +122,7 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
     def write_with_column_names(self, data: Any, columns: List[str] = None, job_id: Optional[JobId] = None) -> None:
         """Write a set of columns.
 
-        Parameters:
+        Arguments:
             data (Any): The data to write.
             columns (List[str]): The list of column names to write.
             job_id (Optional[JobId]): An optional identifier of the writer.

+ 4 - 4
taipy/core/data/mongo.py

@@ -215,7 +215,7 @@ class MongoCollectionDataNode(DataNode):
     def _write(self, data) -> None:
         """Check data against a collection of types to handle insertion on the database.
 
-        Parameters:
+        Arguments:
             data (Any): the data to write to the database.
         """
         if not isinstance(data, list):
@@ -234,7 +234,7 @@ class MongoCollectionDataNode(DataNode):
         """
         This method will insert data contained in a list of dictionaries into a collection.
 
-        Parameters:
+        Arguments:
             data (List[Dict]): a list of dictionaries
             drop (bool): drop the collection before inserting the data to overwrite the data in the collection.
         """
@@ -252,7 +252,7 @@ class MongoCollectionDataNode(DataNode):
     def _default_decoder(self, document: Dict) -> Any:
         """Decode a Mongo dictionary to a custom document object for reading.
 
-        Parameters:
+        Arguments:
             document (Dict): the document dictionary return by Mongo query.
 
         Returns:
@@ -263,7 +263,7 @@ class MongoCollectionDataNode(DataNode):
     def _default_encoder(self, document_object: Any) -> Dict:
         """Encode a custom document object to a dictionary for writing to MongoDB.
 
-        Args:
+        Arguments:
             document_object: the custom document class.
 
         Returns:

+ 2 - 2
taipy/core/data/parquet.py

@@ -168,7 +168,7 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
 
         Keyword arguments here which are also present in the Data Node config will overwrite them.
 
-        Parameters:
+        Arguments:
             data (Any): The data to write.
             job_id (JobId): An optional identifier of the writer.
             **write_kwargs (dict[str, any]): The keyword arguments passed to the function
@@ -196,7 +196,7 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
 
         Keyword arguments here which are also present in the Data Node config will overwrite them.
 
-        Parameters:
+        Arguments:
             **read_kwargs (dict[str, any]): The keyword arguments passed to the function
                 `pandas.read_parquet()`.
         """

+ 1 - 1
taipy/core/job/job.py

@@ -424,7 +424,7 @@ class Job(_Entity, _Labeled):
         You can be triggered on each change through this function except for the _submitted_
         status.
 
-        Parameters:
+        Arguments:
             functions: Callables that will be called on each status change.
         """
         functions = list(functions)  # type: ignore

+ 1 - 1
taipy/core/notification/core_event_consumer.py

@@ -56,7 +56,7 @@ class CoreEventConsumerBase(threading.Thread):
     def __init__(self, registration_id: str, queue: SimpleQueue) -> None:
         """Initialize a CoreEventConsumerBase instance.
 
-        Parameters:
+        Arguments:
             registration_id (str): A unique identifier of the registration. You can get a
                 registration id invoking `Notifier.register()^` method.
             queue (SimpleQueue): The queue from which events will be consumed. You can get a

+ 1 - 1
taipy/core/notification/event.py

@@ -132,7 +132,7 @@ def _make_event(
     """Helper function to make an event for this entity with the given `EventOperation^` type.
     In case of `EventOperation.UPDATE^` events, an attribute name and value must be given.
 
-    Parameters:
+    Arguments:
         entity (Any): The entity object to generate an event for.
         operation (EventOperation^): The operation of the event. The possible values are:
             <ul>

+ 4 - 4
taipy/core/notification/notifier.py

@@ -31,7 +31,7 @@ def _publish_event(
     It basically creates an event corresponding to the given arguments
     and send it using `Notifier.publish(event)`
 
-    Parameters:
+    Arguments:
         entity_type (EventEntityType^)
         operation (EventOperation^)
         entity_id (Optional[str])
@@ -84,7 +84,7 @@ class Notifier:
             )
             ```
 
-        Parameters:
+        Arguments:
             entity_type (Optional[EventEntityType^]): If provided, the listener will
                 be notified for all events related to this entity type. Otherwise,
                 the listener will be notified for events related to all entity types.
@@ -147,7 +147,7 @@ class Notifier:
             Notifier.unregister(registration_id)
             ```
 
-        Parameters:
+        Arguments:
             registration_id (`RegistrationId`): The registration id returned by the `register` method.
         """
         to_remove_registration: Optional[_Registration] = None
@@ -168,7 +168,7 @@ class Notifier:
     def publish(cls, event: Event) -> None:
         """Publish a Taipy application event to all registered listeners whose topic matches the event.
 
-        Parameters:
+        Arguments:
             event (`Event^`): The event to publish.
         """
         for topic, registrations in cls._topics_registrations_list.items():

+ 1 - 1
taipy/core/orchestrator.py

@@ -70,7 +70,7 @@ class Orchestrator:
         """Stop the Orchestrator service.
         This function stops the dispatcher and unblock the Config for update.
 
-        Parameters:
+        Arguments:
             wait (bool): If True, the method will wait for the dispatcher to stop.
             timeout (Optional[float]): The maximum time to wait. If None, the method will wait indefinitely.
         """

+ 1 - 1
taipy/core/scenario/_scenario_manager.py

@@ -322,7 +322,7 @@ class _ScenarioManager(_Manager[Scenario], _VersionMixin):
         """
         Filter a list of scenarios by a given creation time period.
 
-        Parameters:
+        Arguments:
             created_start_time (Optional[datetime]): Start time of the period. The start time is inclusive.
             created_end_time (Optional[datetime]): End time of the period. The end time is exclusive.
 

+ 13 - 13
taipy/core/scenario/scenario.py

@@ -177,7 +177,7 @@ class Scenario(_Entity, Submittable, _Labeled):
 
         The attribute can be a sequence, a task, or a data node.
 
-        Parameters:
+        Arguments:
             attribute_name (str): The name of the attribute to get.
 
         Returns:
@@ -342,7 +342,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     def has_tag(self, tag: str) -> bool:
         """Indicate if the scenario has a given tag.
 
-        Parameters:
+        Arguments:
             tag (str): The tag to search among the set of scenario's tags.
 
         Returns:
@@ -358,7 +358,7 @@ class Scenario(_Entity, Submittable, _Labeled):
         Note:
             Notification will be available only for jobs created after this subscription.
 
-        Parameters:
+        Arguments:
             callback (Callable[[Scenario^, Job^], None]): The callable function to be called
                 on status change.
             params (Optional[List[Any]]): The parameters to be passed to the _callback_.
@@ -373,7 +373,7 @@ class Scenario(_Entity, Submittable, _Labeled):
         Note:
             The function will continue to be called for ongoing jobs.
 
-        Parameters:
+        Arguments:
             callback (Callable[[Scenario^, Job^], None]): The callable function to unsubscribe.
             params (Optional[List[Any]]): The parameters to be passed to the _callback_.
         """
@@ -393,7 +393,7 @@ class Scenario(_Entity, Submittable, _Labeled):
 
         All the `Task^`s of the scenario will be submitted for execution.
 
-        Parameters:
+        Arguments:
             callbacks (List[Callable]): The list of callable functions to be called on status
                 change.
             force (bool): Force execution even if the data nodes are in cache.
@@ -427,7 +427,7 @@ class Scenario(_Entity, Submittable, _Labeled):
         If the scenario's cycle already have another scenario tagged with _tag_ the other
         scenario will be untagged.
 
-        Parameters:
+        Arguments:
             tag (str): The tag to add to this scenario.
         """
         from ... import core as tp
@@ -437,7 +437,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     def remove_tag(self, tag: str) -> None:
         """Remove a tag from this scenario.
 
-        Parameters:
+        Arguments:
             tag (str): The tag to remove from the set of the scenario's tags.
         """
         from ... import core as tp
@@ -479,7 +479,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     ) -> None:
         """Add a sequence to the scenario.
 
-        Parameters:
+        Arguments:
             name (str): The name of the sequence.
             tasks (Union[List[Task], List[TaskId]]): The list of scenario's tasks to add to the sequence.
             properties (Optional[Dict]): The optional properties of the sequence.
@@ -504,7 +504,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     ) -> None:
         """Update an existing sequence.
 
-        Parameters:
+        Arguments:
             name (str): The name of the sequence to update.
             tasks (Union[List[Task], List[TaskId]]): The new list of scenario's tasks.
             properties (Optional[Dict]): The new properties of the sequence.
@@ -525,7 +525,7 @@ class Scenario(_Entity, Submittable, _Labeled):
         Note:
             To provide properties and subscribers for the sequences, use `Scenario.add_sequence^` instead.
 
-        Parameters:
+        Arguments:
             sequences (Dict[str, Union[List[Task], List[TaskId]]]):
                 A dictionary containing sequences to add. Each key is a sequence name, and the value must
                 be a list of the scenario tasks.
@@ -545,7 +545,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     def remove_sequence(self, name: str) -> None:
         """Remove a sequence from the scenario.
 
-        Parameters:
+        Arguments:
             name (str): The name of the sequence to remove.
         """
         seq_id = self.sequences[name].id
@@ -557,7 +557,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     def remove_sequences(self, sequence_names: List[str]) -> None:
         """Remove multiple sequences from the scenario.
 
-        Parameters:
+        Arguments:
             sequence_names (List[str]): A list of sequence names to remove.
         """
         _sequences = _Reloader()._reload(self._MANAGER_NAME, self)._sequences
@@ -576,7 +576,7 @@ class Scenario(_Entity, Submittable, _Labeled):
     def rename_sequence(self, old_name, new_name) -> None:
         """Rename a scenario sequence.
 
-        Parameters:
+        Arguments:
             old_name (str): The current name of the sequence to rename.
             new_name (str): The new name of the sequence.
 

+ 4 - 4
taipy/core/sequence/sequence.py

@@ -163,7 +163,7 @@ class Sequence(_Entity, Submittable, _Labeled):
 
         The attribute can be a task or a data node.
 
-        Parameters:
+        Arguments:
             attribute_name (str): The attribute name.
 
         Returns:
@@ -264,7 +264,7 @@ class Sequence(_Entity, Submittable, _Labeled):
         Note:
             Notification will be available only for jobs created after this subscription.
 
-        Parameters:
+        Arguments:
             callback (Callable[[Sequence^, Job^], None]): The callable function to be called on
                 status change.
             params (Optional[List[Any]]): The parameters to be passed to the _callback_.
@@ -279,7 +279,7 @@ class Sequence(_Entity, Submittable, _Labeled):
         Note:
             The function will continue to be called for ongoing jobs.
 
-        Parameters:
+        Arguments:
             callback (Callable[[Sequence^, Job^], None]): The callable function to unsubscribe.
             params (Optional[List[Any]]): The parameters to be passed to the _callback_.
         """
@@ -299,7 +299,7 @@ class Sequence(_Entity, Submittable, _Labeled):
 
         All the `Task^`s of the sequence will be submitted for execution.
 
-        Parameters:
+        Arguments:
             callbacks (List[Callable]): The list of callable functions to be called on status
                 change.
             force (bool): Force execution even if the data nodes are in cache.

+ 27 - 27
taipy/core/taipy.py

@@ -61,7 +61,7 @@ def set(entity: Union[DataNode, Task, Sequence, Scenario, Cycle, Submission]):
 
     This function allows you to save or update an entity in Taipy.
 
-    Parameters:
+    Arguments:
         entity (Union[DataNode^, Task^, Sequence^, Scenario^, Cycle^, Submission^]): The
             entity to save or update.
     """
@@ -231,7 +231,7 @@ def submit(
     If the entity is a sequence or a scenario, all the tasks of the entity are
     submitted for execution.
 
-    Parameters:
+    Arguments:
         entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to submit.
         force (bool): If True, the execution is forced even if for skippable tasks.
         wait (bool): Wait for the orchestrated jobs created from the submission to be finished
@@ -302,7 +302,7 @@ def exists(
     `DataNodeId`, `SequenceId`, `ScenarioId`, `JobId`, `CycleId`, `SubmissionId`, and string
     representations.
 
-    Parameters:
+    Arguments:
         entity_id (Union[DataNodeId, TaskId, SequenceId, ScenarioId, JobId, CycleId, SubmissionId, str]): The
             identifier of the entity to check for existence.
 
@@ -377,7 +377,7 @@ def get(
     `Task^`, `DataNode^`, `Sequence^`, `Job^`, `Cycle^`, `Submission^`, or `Scenario^`.
 
 
-    Parameters:
+    Arguments:
         entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, CycleId, str]):
             The identifier of the entity to retrieve.<br/>
             It should conform to the identifier pattern of one of the entities (`Task^`, `DataNode^`,
@@ -424,7 +424,7 @@ def is_deletable(entity: Union[Scenario, Job, Submission, ScenarioId, JobId, Sub
     This function determines whether a scenario or a job can be safely
     deleted without causing conflicts or issues.
 
-    Parameters:
+    Arguments:
         entity (Union[Scenario, Job, Submission, ScenarioId, JobId, SubmissionId]): The scenario,
             job or submission to check.
 
@@ -465,7 +465,7 @@ def delete(entity_id: Union[TaskId, DataNodeId, SequenceId, ScenarioId, JobId, C
       The submission can only be deleted if the execution has been finished.
     - If a `JobId` is provided, the job entity can only be deleted if the execution has been finished.
 
-    Parameters:
+    Arguments:
         entity_id (Union[TaskId, DataNodeId, SequenceId, ScenarioId, SubmissionId, JobId, CycleId]):
             The identifier of the entity to delete.
 
@@ -506,7 +506,7 @@ def get_scenarios(
     list contains scenarios that belong to the specified *cycle* and also
     have the specified *tag*.
 
-    Parameters:
+    Arguments:
         cycle (Optional[Cycle^]): The optional `Cycle^` to filter scenarios by.
         tag (Optional[str]): The optional tag to filter scenarios by.
         is_sorted (bool): If True, sort the output list of scenarios using the sorting key.
@@ -546,7 +546,7 @@ def get_scenarios(
 def get_primary(cycle: Cycle) -> Optional[Scenario]:
     """Retrieve the primary scenario associated with a cycle.
 
-    Parameters:
+    Arguments:
         cycle (Cycle^): The cycle for which to retrieve the primary scenario.
 
     Returns:
@@ -565,7 +565,7 @@ def get_primary_scenarios(
 ) -> List[Scenario]:
     """Retrieve a list of all primary scenarios.
 
-    Parameters:
+    Arguments:
         is_sorted (bool): If True, sort the output list of scenarios using the sorting key.
             The default value is False.
         descending (bool): If True, sort the output list of scenarios in descending order.
@@ -597,7 +597,7 @@ def is_promotable(scenario: Union[Scenario, ScenarioId]) -> ReasonCollection:
     This function checks whether the given scenario is eligible to be promoted
     as a primary scenario.
 
-    Parameters:
+    Arguments:
         scenario (Union[Scenario, ScenarioId]): The scenario to be evaluated for promotion.
 
     Returns:
@@ -614,7 +614,7 @@ def set_primary(scenario: Scenario):
     If the cycle already has a primary scenario, that scenario is demoted and is
     no longer considered the primary scenario for its cycle.
 
-    Parameters:
+    Arguments:
         scenario (Scenario^): The scenario to promote as the new _primary_ scenario.
     """
     return _ScenarioManagerFactory._build_manager()._set_primary(scenario)
@@ -625,7 +625,7 @@ def tag(scenario: Scenario, tag: str):
 
     This function adds a user-defined tag to the specified scenario.
 
-    Parameters:
+    Arguments:
         scenario (Scenario^): The scenario to which the tag will be added.
         tag (str): The tag to apply to the scenario.
     """
@@ -638,7 +638,7 @@ def untag(scenario: Scenario, tag: str):
     This function removes a specified tag from the given scenario. If the scenario does
     not have the specified tag, it has no effect.
 
-    Parameters:
+    Arguments:
         scenario (Scenario^): The scenario from which the tag will be removed.
         tag (str): The tag to remove from the scenario.
     """
@@ -651,7 +651,7 @@ def compare_scenarios(*scenarios: Scenario, data_node_config_id: Optional[str] =
     You can specify which data node config identifier should the comparison be performed
     on.
 
-    Parameters:
+    Arguments:
         *scenarios (*Scenario^): The list of the scenarios to compare.
         data_node_config_id (Optional[str]): The config identifier of the DataNode to perform
             the comparison on. <br/>
@@ -685,7 +685,7 @@ def subscribe_scenario(
     The subscription is applied to all jobs created for the execution of _scenario_.
     If no scenario is provided, the subscription applies to all scenarios.
 
-    Parameters:
+    Arguments:
         callback (Callable[[Scenario^, Job^], None]): The function to be called on
             status change.
         params (Optional[List[Any]]): The parameters to be passed to the _callback_.
@@ -706,7 +706,7 @@ def unsubscribe_scenario(
 
     If no scenario is provided, the subscription is removed for all scenarios.
 
-    Parameters:
+    Arguments:
         callback (Callable[[Scenario^, Job^], None]): The function to unsubscribe from.
         params (Optional[List[Any]]): The parameters to be passed to the callback.
         scenario (Optional[Scenario]): The scenario to unsubscribe from. If None, it
@@ -725,7 +725,7 @@ def subscribe_sequence(
 
     The subscription is applied to all jobs created for the execution of _sequence_.
 
-    Parameters:
+    Arguments:
         callback (Callable[[Sequence^, Job^], None]): The callable function to be called on
             status change.
         params (Optional[List[Any]]): The parameters to be passed to the _callback_.
@@ -743,7 +743,7 @@ def unsubscribe_sequence(
 ) -> None:
     """Unsubscribe a function that is called when the status of a Job changes.
 
-    Parameters:
+    Arguments:
         callback (Callable[[Sequence^, Job^], None]): The callable function to be called on
             status change.
         params (Optional[List[Any]]): The parameters to be passed to the _callback_.
@@ -789,7 +789,7 @@ def delete_job(job: Job, force: Optional[bool] = False):
     This function deletes the specified job. If the job is not completed and
     *force* is not set to True, a `JobNotDeletedException^` may be raised.
 
-    Parameters:
+    Arguments:
         job (Job^): The job to delete.
         force (Optional[bool]): If True, forces the deletion of _job_, even
             if it is not completed yet.
@@ -810,7 +810,7 @@ def cancel_job(job: Union[str, Job]):
 
     This function cancels the specified job and sets the status of any subsequent jobs to ABANDONED.
 
-    Parameters:
+    Arguments:
         job (Job^): The job to cancel.
     """
     _JobManagerFactory._build_manager()._cancel(job)
@@ -821,7 +821,7 @@ def get_latest_job(task: Task) -> Optional[Job]:
 
     This function retrieves the latest job associated with a task.
 
-    Parameters:
+    Arguments:
         task (Task^): The task to retrieve the latest job from.
 
     Returns:
@@ -835,7 +835,7 @@ def get_latest_submission(entity: Union[Scenario, Sequence, Task]) -> Optional[S
 
     This function retrieves the latest submission associated with a scenario, sequence or task.
 
-    Parameters:
+    Arguments:
         entity (Union[Scenario^, Sequence^, Task^]): The scenario, sequence or task to
             retrieve the latest submission from.
 
@@ -891,7 +891,7 @@ def create_scenario(
     If the scenario belongs to a cycle, the cycle (corresponding to the _creation_date_
     and the configuration frequency attribute) is created if it does not exist yet.
 
-    Parameters:
+    Arguments:
         config (ScenarioConfig^): The scenario configuration used to create a new scenario.
         creation_date (Optional[datetime.datetime]): The creation date of the scenario.
             If None, the current date time is used.
@@ -915,7 +915,7 @@ def create_global_data_node(config: DataNodeConfig) -> DataNode:
     This function checks and locks the configuration, manages application's version,
     and creates the new data node from the data node configuration provided.
 
-    Parameters:
+    Arguments:
         config (DataNodeConfig^): The data node configuration. It must have a `GLOBAL` scope.
 
     Returns:
@@ -940,7 +940,7 @@ def clean_all_entities(version_number: str) -> bool:
     """Deletes all entities associated with the specified version.
     This function cleans all entities, including jobs, submissions, scenarios, cycles, sequences, tasks, and data nodes.
 
-    Parameters:
+    Arguments:
         version_number (str): The version number of the entities to be deleted.<br/>
             - If the specified version does not exist, the operation will be aborted, and False will be returned.
 
@@ -975,7 +975,7 @@ def get_parents(
 ) -> Dict[str, Set[_Entity]]:
     """Get the parents of an entity from itself or its identifier.
 
-    Parameters:
+    Arguments:
         entity (Union[TaskId, DataNodeId, SequenceId, Task, DataNode, Sequence]): The entity or its
             identifier to get the parents.
 
@@ -1054,7 +1054,7 @@ def get_entities_by_config_id(
 ) -> Union[List, List[Task], List[DataNode], List[Sequence], List[Scenario]]:
     """Get the entities by its config id.
 
-    Parameters:
+    Arguments:
         config_id (str): The config id of the entities
 
     Returns:

+ 2 - 2
taipy/core/task/task.py

@@ -28,7 +28,7 @@ from .task_id import TaskId
 
 
 class Task(_Entity, _Labeled):
-    """Hold a user function that will be executed, its parameters and the results.
+    """Hold a user function that will be executed, its parameters, and the results.
 
     A `Task` brings together the user code as function, the inputs and the outputs
     as data nodes (instances of the `DataNode^` class).
@@ -245,7 +245,7 @@ class Task(_Entity, _Labeled):
     ) -> Submission:
         """Submit the task for execution.
 
-        Parameters:
+        Arguments:
             callbacks (List[Callable]): The list of callable functions to be called on status
                 change.
             force (bool): Force execution even if the data nodes are in cache.

+ 1 - 1
taipy/gui/_gui_section.py

@@ -50,7 +50,7 @@ class _GuiSection(UniqueSection):
     def _configure(**properties) -> "_GuiSection":
         """Configure the Graphical User Interface.
 
-        Parameters:
+        Arguments:
             **properties (dict[str, any]): Keyword arguments that configure the behavior of the `Gui^` instances.<br/>
                 Please refer to the gui config section
                 [page](../../../../../../userman/advanced_features/configuration/gui-config.md#configuring-the-gui-instance)

+ 2 - 2
taipy/rest/api/resources/cycle.py

@@ -106,7 +106,7 @@ class CycleResource(Resource):
           When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_READER` role.
 
-      parameters:
+      arguments:
         - in: path
           name: cycle_id
           schema:
@@ -179,7 +179,7 @@ class CycleResource(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EDITOR` role.
 
-      parameters:
+      arguments:
         - in: path
           name: cycle_id
           schema:

+ 5 - 5
taipy/rest/api/resources/datanode.py

@@ -157,7 +157,7 @@ class DataNodeResource(Resource):
           When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_READER` role.
 
-      parameters:
+      arguments:
         - in: path
           name: datanode_id
           schema:
@@ -235,7 +235,7 @@ class DataNodeResource(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EDITOR` role.
 
-      parameters:
+      arguments:
         - in: path
           name: datanode_id
           schema:
@@ -433,7 +433,7 @@ class DataNodeList(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EDITOR` role.
 
-      parameters:
+      arguments:
         - in: query
           name: config_id
           schema:
@@ -549,7 +549,7 @@ class DataNodeReader(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_READER` role.
 
-      parameters:
+      arguments:
         - in: path
           name: datanode_id
           schema:
@@ -625,7 +625,7 @@ class DataNodeWriter(Resource):
            http://localhost:5000/api/v1/datanodes/DATANODE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9/write
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: datanode_id
           schema:

+ 4 - 4
taipy/rest/api/resources/job.py

@@ -54,7 +54,7 @@ class JobResource(Resource):
           curl -X GET http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: job_id
           schema:
@@ -87,7 +87,7 @@ class JobResource(Resource):
           curl -X DELETE http://localhost:5000/api/v1/jobs/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: job_id
           schema:
@@ -174,7 +174,7 @@ class JobList(Resource):
           curl -X POST http://localhost:5000/api/v1/jobs?task_id=TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: query
           name: task_id
           schema:
@@ -255,7 +255,7 @@ class JobExecutor(Resource):
           curl -X POST http://localhost:5000/api/v1/jobs/cancel/JOB_my_task_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: job_id
           schema:

+ 4 - 4
taipy/rest/api/resources/scenario.py

@@ -118,7 +118,7 @@ class ScenarioResource(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_READER` role.
 
-        parameters:
+        arguments:
         - in: path
           name: scenario_id
           schema:
@@ -193,7 +193,7 @@ class ScenarioResource(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EDITOR` role.
 
-      parameters:
+      arguments:
         - in: path
           name: scenario_id
           schema:
@@ -393,7 +393,7 @@ class ScenarioList(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EDITOR` role.
 
-      parameters:
+      arguments:
         - in: query
           name: config_id
           schema:
@@ -498,7 +498,7 @@ class ScenarioExecutor(Resource):
             When the authorization feature is activated (available in Taipy Enterprise edition only), this endpoint
             requires the `TAIPY_EXECUTOR` role.
 
-      parameters:
+      arguments:
         - in: path
           name: scenario_id
           schema:

+ 4 - 4
taipy/rest/api/resources/sequence.py

@@ -55,7 +55,7 @@ class SequenceResource(Resource):
           curl -X GET http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: sequence_id
           schema:
@@ -88,7 +88,7 @@ class SequenceResource(Resource):
           curl -X DELETE http://localhost:5000/api/v1/sequences/SEQUENCE_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: sequence_id
           schema:
@@ -176,7 +176,7 @@ class SequenceList(Resource):
            http://localhost:5000/api/v1/sequences
         ```
 
-      parameters:
+      arguments:
         - in: query
           name: scenario_id
           schema:
@@ -261,7 +261,7 @@ class SequenceExecutor(Resource):
           curl -X POST http://localhost:5000/api/v1/sequences/submit/SEQUENCE_my_config_7575-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: sequence_id
           schema:

+ 4 - 4
taipy/rest/api/resources/task.py

@@ -54,7 +54,7 @@ class TaskResource(Resource):
           curl -X GET http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: task_id
           schema:
@@ -86,7 +86,7 @@ class TaskResource(Resource):
         ```shell
           curl -X DELETE http://localhost:5000/api/v1/tasks/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
-      parameters:
+      arguments:
         - in: path
           name: task_id
           schema:
@@ -172,7 +172,7 @@ class TaskList(Resource):
         ```shell
           curl -X POST http://localhost:5000/api/v1/tasks?config_id=my_task_config
         ```
-      parameters:
+      arguments:
         - in: query
           name: config_id
           schema:
@@ -247,7 +247,7 @@ class TaskExecutor(Resource):
           curl -X POST http://localhost:5000/api/v1/tasks/submit/TASK_my_config_75750ed8-4e09-4e00-958d-e352ee426cc9
         ```
 
-      parameters:
+      arguments:
         - in: path
           name: task_id
           schema:

+ 1 - 1
taipy/rest/rest.py

@@ -41,7 +41,7 @@ class Rest:
         """
         Start a REST API server. This method is blocking.
 
-        Parameters:
+        Arguments:
             **kwargs : Options to provide to the application server.
         """
         self._app.run(**kwargs)