test_csv_data_node.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. # Copyright 2021-2025 Avaiga Private Limited
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
  4. # the License. You may obtain a copy of the License at
  5. #
  6. # http://www.apache.org/licenses/LICENSE-2.0
  7. #
  8. # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  9. # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  10. # specific language governing permissions and limitations under the License.
  11. import dataclasses
  12. import os
  13. import pathlib
  14. import re
  15. import uuid
  16. from datetime import datetime, timedelta
  17. from time import sleep
  18. import freezegun
  19. import numpy as np
  20. import pandas as pd
  21. import pytest
  22. from pandas.testing import assert_frame_equal
  23. from taipy import Scope
  24. from taipy.common.config import Config
  25. from taipy.common.config.exceptions.exceptions import InvalidConfigurationId
  26. from taipy.core.common._utils import _normalize_path
  27. from taipy.core.data._data_manager import _DataManager
  28. from taipy.core.data._data_manager_factory import _DataManagerFactory
  29. from taipy.core.data.csv import CSVDataNode
  30. from taipy.core.data.data_node_id import DataNodeId
  31. from taipy.core.exceptions.exceptions import InvalidExposedType
  32. from taipy.core.reason import NoFileToDownload, NotAFile
  33. @pytest.fixture(scope="function", autouse=True)
  34. def cleanup():
  35. yield
  36. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.csv")
  37. if os.path.isfile(path):
  38. os.remove(path)
  39. @dataclasses.dataclass
  40. class MyCustomObject:
  41. id: int
  42. integer: int
  43. text: str
  44. class TestCSVDataNode:
  45. def test_create(self):
  46. default_path = "data/node/path"
  47. csv_dn_config = Config.configure_csv_data_node(
  48. id="foo_bar", default_path=default_path, has_header=False, name="super name"
  49. )
  50. dn = _DataManagerFactory._build_manager()._create(csv_dn_config, None, None)
  51. assert isinstance(dn, CSVDataNode)
  52. assert dn.storage_type() == "csv"
  53. assert dn.config_id == "foo_bar"
  54. assert dn.name == "super name"
  55. assert dn.scope == Scope.SCENARIO
  56. assert dn.id is not None
  57. assert dn.owner_id is None
  58. assert dn.last_edit_date is None
  59. assert dn.job_ids == []
  60. assert not dn.is_ready_for_reading
  61. assert dn.path == default_path
  62. assert dn.properties["has_header"] is False
  63. assert dn.properties["exposed_type"] == "pandas"
  64. csv_dn_config = Config.configure_csv_data_node(
  65. id="foo", default_path=default_path, has_header=True, exposed_type=MyCustomObject
  66. )
  67. dn = _DataManagerFactory._build_manager()._create(csv_dn_config, None, None)
  68. assert dn.storage_type() == "csv"
  69. assert dn.config_id == "foo"
  70. assert dn.properties["has_header"] is True
  71. assert dn.properties["exposed_type"] == MyCustomObject
  72. with pytest.raises(InvalidConfigurationId):
  73. CSVDataNode(
  74. "foo bar", Scope.SCENARIO, properties={"path": default_path, "has_header": False, "name": "super name"}
  75. )
  76. def test_modin_deprecated_in_favor_of_pandas(self):
  77. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  78. # Create CSVDataNode with modin exposed_type
  79. csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
  80. assert csv_data_node_as_modin.properties["exposed_type"] == "pandas"
  81. data_modin = csv_data_node_as_modin.read()
  82. assert isinstance(data_modin, pd.DataFrame)
  83. def test_get_user_properties(self, csv_file):
  84. dn_1 = CSVDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})
  85. assert dn_1._get_user_properties() == {}
  86. dn_2 = CSVDataNode(
  87. "dn_2",
  88. Scope.SCENARIO,
  89. properties={
  90. "exposed_type": "numpy",
  91. "default_data": "foo",
  92. "default_path": csv_file,
  93. "has_header": False,
  94. "foo": "bar",
  95. },
  96. )
  97. # exposed_type, default_data, default_path, path, has_header, sheet_name are filtered out
  98. assert dn_2._get_user_properties() == {"foo": "bar"}
  99. def test_new_csv_data_node_with_existing_file_is_ready_for_reading(self):
  100. not_ready_dn_cfg = Config.configure_data_node("not_ready_data_node_config_id", "csv", path="NOT_EXISTING.csv")
  101. not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg]
  102. assert not not_ready_dn.is_ready_for_reading
  103. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  104. ready_dn_cfg = Config.configure_data_node("ready_data_node_config_id", "csv", path=path)
  105. ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg]
  106. assert ready_dn.is_ready_for_reading
  107. @pytest.mark.parametrize(
  108. ["properties", "exists"],
  109. [
  110. ({}, False),
  111. ({"default_data": ["foo", "bar"]}, True),
  112. ],
  113. )
  114. def test_create_with_default_data(self, properties, exists):
  115. dn = CSVDataNode("foo", Scope.SCENARIO, DataNodeId(f"dn_id_{uuid.uuid4()}"), properties=properties)
  116. assert dn.path == f"{Config.core.storage_folder}csvs/{dn.id}.csv"
  117. assert os.path.exists(dn.path) is exists
  118. def test_set_path(self):
  119. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.csv"})
  120. _DataManagerFactory._build_manager()._repository._save(dn)
  121. assert dn.path == "foo.csv"
  122. dn.path = "bar.csv"
  123. assert dn.path == "bar.csv"
  124. def test_read_write_after_modify_path(self):
  125. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  126. new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.csv")
  127. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
  128. _DataManagerFactory._build_manager()._repository._save(dn)
  129. read_data = dn.read()
  130. assert read_data is not None
  131. dn.path = new_path
  132. with pytest.raises(FileNotFoundError):
  133. dn.read()
  134. dn.write(read_data)
  135. assert dn.read().equals(read_data)
  136. def test_pandas_exposed_type(self):
  137. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  138. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  139. assert isinstance(dn.read(), pd.DataFrame)
  140. def test_pandas_dataframe_exposed_type(self):
  141. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  142. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": pd.DataFrame})
  143. assert isinstance(dn.read(), pd.DataFrame)
  144. def test_pandas_dataframe_exposed_type_a(self):
  145. import pandas
  146. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  147. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": pandas.DataFrame})
  148. assert isinstance(dn.read(), pandas.DataFrame)
  149. def test_pandas_dataframe_exposed_type_b(self):
  150. from pandas import DataFrame
  151. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  152. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": DataFrame})
  153. assert isinstance(dn.read(), DataFrame)
  154. def test_pandas_dataframe_exposed_type_c(self):
  155. from pandas import DataFrame as DF
  156. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  157. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": DF})
  158. assert isinstance(dn.read(), DF)
  159. def test_numpy_ndarray_exposed_type(self):
  160. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  161. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": np.ndarray})
  162. assert isinstance(dn.read(), np.ndarray)
  163. def test_numpy_ndarray_exposed_type_a(self):
  164. import numpy
  165. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  166. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": numpy.ndarray})
  167. assert isinstance(dn.read(), numpy.ndarray)
  168. def test_numpy_ndarray_exposed_type_b(self):
  169. from numpy import ndarray
  170. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  171. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": ndarray})
  172. assert isinstance(dn.read(), ndarray)
  173. def test_numpy_ndarray_exposed_type_c(self):
  174. from numpy import ndarray as nd_array
  175. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  176. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": nd_array})
  177. assert isinstance(dn.read(), nd_array)
  178. def test_raise_error_invalid_exposed_type(self):
  179. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  180. with pytest.raises(InvalidExposedType):
  181. CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "foo"})
  182. def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  183. temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.csv"))
  184. pd.DataFrame([]).to_csv(temp_file_path)
  185. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
  186. _DataManagerFactory._build_manager()._repository._save(dn)
  187. dn.write(pd.DataFrame([1, 2, 3]))
  188. previous_edit_date = dn.last_edit_date
  189. sleep(0.1)
  190. pd.DataFrame([4, 5, 6]).to_csv(temp_file_path)
  191. new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  192. assert previous_edit_date < dn.last_edit_date
  193. assert new_edit_date == dn.last_edit_date
  194. sleep(0.1)
  195. dn.write(pd.DataFrame([7, 8, 9]))
  196. assert new_edit_date < dn.last_edit_date
  197. os.unlink(temp_file_path)
  198. def test_migrate_to_new_path(self, tmp_path):
  199. _base_path = os.path.join(tmp_path, ".data")
  200. path = os.path.join(_base_path, "test.csv")
  201. # create a file on old path
  202. os.mkdir(_base_path)
  203. with open(path, "w"):
  204. pass
  205. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  206. assert ".data" not in dn.path
  207. assert os.path.exists(dn.path)
  208. def test_is_downloadable(self):
  209. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  210. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  211. reasons = dn.is_downloadable()
  212. assert reasons
  213. assert reasons.reasons == ""
  214. def test_is_not_downloadable_no_file(self):
  215. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/wrong_example.csv")
  216. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  217. reasons = dn.is_downloadable()
  218. assert not reasons
  219. assert len(reasons._reasons) == 1
  220. assert str(NoFileToDownload(_normalize_path(path), dn.id)) in reasons.reasons
  221. def test_is_not_downloadable_not_a_file(self):
  222. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample")
  223. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  224. reasons = dn.is_downloadable()
  225. assert not reasons
  226. assert len(reasons._reasons) == 1
  227. assert str(NotAFile(_normalize_path(path), dn.id)) in reasons.reasons
  228. def test_get_downloadable_path(self):
  229. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  230. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  231. assert re.split(r"[\\/]", dn._get_downloadable_path()) == re.split(r"[\\/]", path)
  232. def test_get_downloadable_path_with_not_existing_file(self):
  233. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "NOT_EXISTING.csv", "exposed_type": "pandas"})
  234. assert dn._get_downloadable_path() == ""
  235. def is_uploadable(self):
  236. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  237. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  238. assert dn.is_uploadable()
  239. def test_upload(self, csv_file, tmpdir_factory):
  240. old_csv_path = tmpdir_factory.mktemp("data").join("df.csv").strpath
  241. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  242. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
  243. _DataManagerFactory._build_manager()._repository._save(dn)
  244. dn.write(old_data)
  245. old_last_edit_date = dn.last_edit_date
  246. upload_content = pd.read_csv(csv_file)
  247. with freezegun.freeze_time(old_last_edit_date + timedelta(seconds=1)):
  248. dn._upload(csv_file)
  249. assert_frame_equal(dn.read(), upload_content) # The content of the dn should change to the uploaded content
  250. assert dn.last_edit_date > old_last_edit_date
  251. assert dn.path == _normalize_path(old_csv_path) # The path of the dn should not change
  252. def test_upload_fails_if_data_node_locked(self, csv_file, tmpdir_factory):
  253. old_csv_path = tmpdir_factory.mktemp("data").join("df.csv").strpath
  254. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  255. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
  256. _DataManagerFactory._build_manager()._repository._save(dn)
  257. dn.write(old_data)
  258. upload_content = pd.read_csv(csv_file)
  259. dn.lock_edit("editor_id_1")
  260. reasons = dn._upload(csv_file, editor_id="editor_id_2")
  261. assert not reasons
  262. assert dn._upload(csv_file, editor_id="editor_id_1")
  263. assert_frame_equal(dn.read(), upload_content) # The content of the dn should change to the uploaded content
  264. def test_upload_with_upload_check_with_exception(self, csv_file, tmpdir_factory, caplog):
  265. old_csv_path = tmpdir_factory.mktemp("data").join("df.csv").strpath
  266. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
  267. def check_with_exception(upload_path, upload_data):
  268. raise Exception("An error with check_with_exception")
  269. reasons = dn._upload(csv_file, upload_checker=check_with_exception)
  270. assert bool(reasons) is False
  271. assert (
  272. f"Error with the upload checker `check_with_exception` "
  273. f"while checking `df.csv` file for upload to the data "
  274. f"node `{dn.id}`:" in caplog.text
  275. )
  276. def test_upload_with_upload_check_pandas(self, csv_file, tmpdir_factory):
  277. old_csv_path = tmpdir_factory.mktemp("data").join("df.csv").strpath
  278. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  279. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "pandas"})
  280. _DataManagerFactory._build_manager()._repository._save(dn)
  281. dn.write(old_data)
  282. old_last_edit_date = dn.last_edit_date
  283. def check_data_column(upload_path, upload_data):
  284. return upload_path.endswith(".csv") and upload_data.columns.tolist() == ["a", "b", "c"]
  285. not_exists_csv_path = tmpdir_factory.mktemp("data").join("not_exists.csv").strpath
  286. reasons = dn._upload(not_exists_csv_path, upload_checker=check_data_column)
  287. assert bool(reasons) is False
  288. assert (
  289. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file 'not_exists.csv' can not be read,"
  290. f" therefore is not a valid data file for data node '{dn.id}'"
  291. )
  292. not_csv_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_csv").strpath
  293. old_data.to_csv(not_csv_path, index=False)
  294. # The upload should fail when the file is not a csv
  295. reasons = dn._upload(not_csv_path, upload_checker=check_data_column)
  296. assert bool(reasons) is False
  297. assert (
  298. str(list(reasons._reasons[dn.id])[0])
  299. == f"The uploaded file 'wrong_format_df.not_csv' has invalid data for data node '{dn.id}'"
  300. )
  301. wrong_format_csv_path = tmpdir_factory.mktemp("data").join("wrong_format_df.csv").strpath
  302. pd.DataFrame([{"a": 1, "b": 2, "d": 3}, {"a": 4, "b": 5, "d": 6}]).to_csv(wrong_format_csv_path, index=False)
  303. # The upload should fail when check_data_column() return False
  304. reasons = dn._upload(wrong_format_csv_path, upload_checker=check_data_column)
  305. assert bool(reasons) is False
  306. assert (
  307. str(list(reasons._reasons[dn.id])[0])
  308. == f"The uploaded file 'wrong_format_df.csv' has invalid data for data node '{dn.id}'"
  309. )
  310. assert_frame_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  311. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  312. assert dn.path == _normalize_path(old_csv_path) # The path of the dn should not change
  313. # The upload should succeed when check_data_column() return True
  314. assert dn._upload(csv_file, upload_checker=check_data_column)
  315. def test_upload_with_upload_check_numpy(self, tmpdir_factory):
  316. old_csv_path = tmpdir_factory.mktemp("data").join("df.csv").strpath
  317. old_data = np.array([[1, 2, 3], [4, 5, 6]])
  318. new_csv_path = tmpdir_factory.mktemp("data").join("new_upload_data.csv").strpath
  319. new_data = np.array([[1, 2, 3], [4, 5, 6]])
  320. pd.DataFrame(new_data).to_csv(new_csv_path, index=False)
  321. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": old_csv_path, "exposed_type": "numpy"})
  322. _DataManagerFactory._build_manager()._repository._save(dn)
  323. dn.write(old_data)
  324. old_last_edit_date = dn.last_edit_date
  325. def check_data_is_positive(upload_path, upload_data):
  326. return upload_path.endswith(".csv") and np.all(upload_data > 0)
  327. not_exists_csv_path = tmpdir_factory.mktemp("data").join("not_exists.csv").strpath
  328. reasons = dn._upload(not_exists_csv_path, upload_checker=check_data_is_positive)
  329. assert bool(reasons) is False
  330. assert (
  331. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file 'not_exists.csv' can not be read"
  332. f", therefore is not a valid data file for data node '{dn.id}'"
  333. )
  334. not_csv_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_csv").strpath
  335. pd.DataFrame(old_data).to_csv(not_csv_path, index=False)
  336. # The upload should fail when the file is not a csv
  337. reasons = dn._upload(not_csv_path, upload_checker=check_data_is_positive)
  338. assert bool(reasons) is False
  339. assert (
  340. str(list(reasons._reasons[dn.id])[0])
  341. == f"The uploaded file 'wrong_format_df.not_csv' has invalid data for data node '{dn.id}'"
  342. )
  343. wrong_format_csv_path = tmpdir_factory.mktemp("data").join("wrong_format_df.csv").strpath
  344. pd.DataFrame(np.array([[-1, 2, 3], [-4, -5, -6]])).to_csv(wrong_format_csv_path, index=False)
  345. # The upload should fail when check_data_is_positive() return False
  346. reasons = dn._upload(wrong_format_csv_path, upload_checker=check_data_is_positive)
  347. assert bool(reasons) is False
  348. assert (
  349. str(list(reasons._reasons[dn.id])[0])
  350. == f"The uploaded file 'wrong_format_df.csv' has invalid data for data node '{dn.id}'"
  351. )
  352. np.array_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  353. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  354. assert dn.path == _normalize_path(old_csv_path) # The path of the dn should not change
  355. # The upload should succeed when check_data_is_positive() return True
  356. assert dn._upload(new_csv_path, upload_checker=check_data_is_positive)