Selaa lähdekoodia

Merge pull request #1445 from Avaiga/fix/#1199-support-pandas-series-signature-in-_convert_data_to_dataframe-method

Fix/#1199 - Support pandas series signature in  _convert_data_to_dataframe()
Đỗ Trường Giang 10 kuukautta sitten
vanhempi
säilyke
d68654c833

+ 2 - 2
taipy/core/data/csv.py

@@ -188,7 +188,7 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
             )
             )
         else:
         else:
             self._convert_data_to_dataframe(exposed_type, data).to_csv(
             self._convert_data_to_dataframe(exposed_type, data).to_csv(
-                self._path, index=False, encoding=self.properties[self.__ENCODING_KEY], header=None
+                self._path, index=False, encoding=self.properties[self.__ENCODING_KEY], header=False
             )
             )
 
 
     def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None):
     def write_with_column_names(self, data: Any, columns: Optional[List[str]] = None, job_id: Optional[JobId] = None):
@@ -201,6 +201,6 @@ class CSVDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         """
         """
         df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
         df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
         if columns and isinstance(df, pd.DataFrame):
         if columns and isinstance(df, pd.DataFrame):
-            df.columns = columns
+            df.columns = pd.Index(columns, dtype="object")
         df.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
         df.to_csv(self._path, index=False, encoding=self.properties[self.__ENCODING_KEY])
         self.track_edit(timestamp=datetime.now(), job_id=job_id)
         self.track_edit(timestamp=datetime.now(), job_id=job_id)

+ 1 - 1
taipy/core/data/excel.py

@@ -301,7 +301,7 @@ class ExcelDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
                 if columns:
                 if columns:
                     data[key].columns = columns
                     data[key].columns = columns
 
 
-                df.to_excel(writer, key, index=False, header=self.properties[self._HAS_HEADER_PROPERTY] or None)
+                df.to_excel(writer, key, index=False, header=self.properties[self._HAS_HEADER_PROPERTY] or False)
 
 
     def _write(self, data: Any):
     def _write(self, data: Any):
         if isinstance(data, Dict):
         if isinstance(data, Dict):

+ 4 - 4
taipy/core/data/parquet.py

@@ -214,10 +214,10 @@ class ParquetDataNode(DataNode, _FileDataNodeMixin, _TabularDataNodeMixin):
         }
         }
         kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY])
         kwargs.update(self.properties[self.__WRITE_KWARGS_PROPERTY])
         kwargs.update(write_kwargs)
         kwargs.update(write_kwargs)
-        if isinstance(data, pd.Series):
-            df = pd.DataFrame(data)
-        else:
-            df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
+
+        df = self._convert_data_to_dataframe(self.properties[self._EXPOSED_TYPE_PROPERTY], data)
+        if isinstance(df, pd.Series):
+            df = pd.DataFrame(df)
 
 
         # Ensure that the columns are strings, otherwise writing will fail with pandas 1.3.5
         # Ensure that the columns are strings, otherwise writing will fail with pandas 1.3.5
         df.columns = df.columns.astype(str)
         df.columns = df.columns.astype(str)

+ 9 - 3
taipy/core/data/sql_table.py

@@ -10,7 +10,7 @@
 # specific language governing permissions and limitations under the License.
 # specific language governing permissions and limitations under the License.
 
 
 from datetime import datetime, timedelta
 from datetime import datetime, timedelta
-from typing import Any, Dict, List, Optional, Set
+from typing import Any, Dict, List, Optional, Set, Union
 
 
 import pandas as pd
 import pandas as pd
 from sqlalchemy import MetaData, Table
 from sqlalchemy import MetaData, Table
@@ -146,8 +146,14 @@ class SQLTableDataNode(_AbstractSQLDataNode):
         connection.execute(table.insert(), data)
         connection.execute(table.insert(), data)
 
 
     @classmethod
     @classmethod
-    def __insert_dataframe(cls, df: pd.DataFrame, table: Any, connection: Any, delete_table: bool) -> None:
-        cls.__insert_dicts(df.to_dict(orient="records"), table, connection, delete_table)
+    def __insert_dataframe(
+        cls, df: Union[pd.DataFrame, pd.Series], table: Any, connection: Any, delete_table: bool
+    ) -> None:
+        if isinstance(df, pd.Series):
+            data = [df.to_dict()]
+        elif isinstance(df, pd.DataFrame):
+            data = df.to_dict(orient="records")
+        cls.__insert_dicts(data, table, connection, delete_table)
 
 
     @classmethod
     @classmethod
     def __delete_all_rows(cls, table: Any, connection: Any, delete_table: bool) -> None:
     def __delete_all_rows(cls, table: Any, connection: Any, delete_table: bool) -> None: