test_parquet_data_node.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. # Copyright 2021-2025 Avaiga Private Limited
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
  4. # the License. You may obtain a copy of the License at
  5. #
  6. # http://www.apache.org/licenses/LICENSE-2.0
  7. #
  8. # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  9. # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  10. # specific language governing permissions and limitations under the License.
  11. import os
  12. import pathlib
  13. import re
  14. import uuid
  15. from datetime import datetime, timedelta
  16. from importlib import util
  17. from time import sleep
  18. import freezegun
  19. import numpy as np
  20. import pandas as pd
  21. import pytest
  22. from pandas.testing import assert_frame_equal
  23. from taipy import Scope
  24. from taipy.common.config import Config
  25. from taipy.common.config.exceptions.exceptions import InvalidConfigurationId
  26. from taipy.core.common._utils import _normalize_path
  27. from taipy.core.data._data_manager import _DataManager
  28. from taipy.core.data._data_manager_factory import _DataManagerFactory
  29. from taipy.core.data.data_node_id import DataNodeId
  30. from taipy.core.data.parquet import ParquetDataNode
  31. from taipy.core.exceptions.exceptions import (
  32. InvalidExposedType,
  33. UnknownCompressionAlgorithm,
  34. UnknownParquetEngine,
  35. )
  36. from taipy.core.reason import NoFileToDownload, NotAFile
  37. @pytest.fixture(scope="function", autouse=True)
  38. def cleanup():
  39. yield
  40. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.parquet")
  41. if os.path.isfile(path):
  42. os.remove(path)
  43. class MyCustomObject:
  44. def __init__(self, id, integer, text):
  45. self.id = id
  46. self.integer = integer
  47. self.text = text
  48. class MyOtherCustomObject:
  49. def __init__(self, id, sentence):
  50. self.id = id
  51. self.sentence = sentence
  52. def create_custom_class(**kwargs):
  53. return MyOtherCustomObject(id=kwargs["id"], sentence=kwargs["text"])
  54. class TestParquetDataNode:
  55. __engine = ["pyarrow"]
  56. if util.find_spec("fastparquet"):
  57. __engine.append("fastparquet")
  58. def test_create(self):
  59. path = "data/node/path"
  60. compression = "snappy"
  61. parquet_dn_config = Config.configure_parquet_data_node(
  62. id="foo_bar", default_path=path, compression=compression, name="super name"
  63. )
  64. dn = _DataManagerFactory._build_manager()._create(parquet_dn_config, None, None)
  65. assert isinstance(dn, ParquetDataNode)
  66. assert dn.storage_type() == "parquet"
  67. assert dn.config_id == "foo_bar"
  68. assert dn.name == "super name"
  69. assert dn.scope == Scope.SCENARIO
  70. assert dn.id is not None
  71. assert dn.owner_id is None
  72. assert dn.last_edit_date is None
  73. assert dn.job_ids == []
  74. assert not dn.is_ready_for_reading
  75. assert dn.path == path
  76. assert dn.properties["exposed_type"] == "pandas"
  77. assert dn.properties["compression"] == "snappy"
  78. assert dn.properties["engine"] == "pyarrow"
  79. parquet_dn_config_1 = Config.configure_parquet_data_node(
  80. id="bar", default_path=path, compression=compression, exposed_type=MyCustomObject
  81. )
  82. dn_1 = _DataManagerFactory._build_manager()._create(parquet_dn_config_1, None, None)
  83. assert isinstance(dn_1, ParquetDataNode)
  84. assert dn_1.properties["exposed_type"] == MyCustomObject
  85. parquet_dn_config_2 = Config.configure_parquet_data_node(
  86. id="bar", default_path=path, compression=compression, exposed_type=np.ndarray
  87. )
  88. dn_2 = _DataManagerFactory._build_manager()._create(parquet_dn_config_2, None, None)
  89. assert isinstance(dn_2, ParquetDataNode)
  90. assert dn_2.properties["exposed_type"] == np.ndarray
  91. parquet_dn_config_3 = Config.configure_parquet_data_node(
  92. id="bar", default_path=path, compression=compression, exposed_type=pd.DataFrame
  93. )
  94. dn_3 = _DataManagerFactory._build_manager()._create(parquet_dn_config_3, None, None)
  95. assert isinstance(dn_3, ParquetDataNode)
  96. assert dn_3.properties["exposed_type"] == pd.DataFrame
  97. with pytest.raises(InvalidConfigurationId):
  98. dn = ParquetDataNode("foo bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
  99. def test_get_user_properties(self, parquet_file_path):
  100. dn_1 = ParquetDataNode("dn_1", Scope.SCENARIO, properties={"path": parquet_file_path})
  101. assert dn_1._get_user_properties() == {}
  102. dn_2 = ParquetDataNode(
  103. "dn_2",
  104. Scope.SCENARIO,
  105. properties={
  106. "exposed_type": "numpy",
  107. "default_data": "foo",
  108. "default_path": parquet_file_path,
  109. "engine": "pyarrow",
  110. "compression": "snappy",
  111. "read_kwargs": {"columns": ["a", "b"]},
  112. "write_kwargs": {"index": False},
  113. "foo": "bar",
  114. },
  115. )
  116. # exposed_type, default_data, default_path, path, engine, compression, read_kwargs, write_kwargs
  117. # are filtered out
  118. assert dn_2._get_user_properties() == {"foo": "bar"}
  119. def test_new_parquet_data_node_with_existing_file_is_ready_for_reading(self, parquet_file_path):
  120. not_ready_dn_cfg = Config.configure_data_node(
  121. "not_ready_data_node_config_id", "parquet", path="NOT_EXISTING.parquet"
  122. )
  123. not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg]
  124. assert not not_ready_dn.is_ready_for_reading
  125. ready_dn_cfg = Config.configure_data_node("ready_data_node_config_id", "parquet", path=parquet_file_path)
  126. ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg]
  127. assert ready_dn.is_ready_for_reading
  128. @pytest.mark.parametrize(
  129. ["properties", "exists"],
  130. [
  131. ({}, False),
  132. ({"default_data": {"a": ["foo", "bar"]}}, True),
  133. ],
  134. )
  135. def test_create_with_default_data(self, properties, exists):
  136. dn = ParquetDataNode("foo", Scope.SCENARIO, DataNodeId(f"dn_id_{uuid.uuid4()}"), properties=properties)
  137. assert dn.path == f"{Config.core.storage_folder}parquets/{dn.id}.parquet"
  138. assert os.path.exists(dn.path) is exists
  139. @pytest.mark.parametrize("engine", __engine)
  140. def test_modin_deprecated_in_favor_of_pandas(self, engine, parquet_file_path):
  141. # Create ParquetDataNode with modin exposed_type
  142. props = {"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
  143. parquet_data_node_as_modin = ParquetDataNode("bar", Scope.SCENARIO, properties=props)
  144. assert parquet_data_node_as_modin.properties["exposed_type"] == "pandas"
  145. data_modin = parquet_data_node_as_modin.read()
  146. assert isinstance(data_modin, pd.DataFrame)
  147. def test_set_path(self):
  148. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": "foo.parquet"})
  149. _DataManagerFactory._build_manager()._repository._save(dn)
  150. assert dn.path == "foo.parquet"
  151. dn.path = "bar.parquet"
  152. assert dn.path == "bar.parquet"
  153. def test_raise_error_unknown_parquet_engine(self):
  154. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  155. with pytest.raises(UnknownParquetEngine):
  156. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "engine": "foo"})
  157. def test_raise_error_unknown_compression_algorithm(self):
  158. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  159. with pytest.raises(UnknownCompressionAlgorithm):
  160. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "compression": "foo"})
  161. def test_raise_error_invalid_exposed_type(self):
  162. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  163. with pytest.raises(InvalidExposedType):
  164. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "foo"})
  165. def test_get_system_file_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  166. temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
  167. pd.DataFrame([]).to_parquet(temp_file_path)
  168. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
  169. _DataManagerFactory._build_manager()._repository._save(dn)
  170. dn.write(pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}))
  171. previous_edit_date = dn.last_edit_date
  172. sleep(0.1)
  173. pd.DataFrame(pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})).to_parquet(temp_file_path)
  174. new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  175. assert previous_edit_date < dn.last_edit_date
  176. assert new_edit_date == dn.last_edit_date
  177. sleep(0.1)
  178. dn.write(pd.DataFrame(data={"col1": [9, 10], "col2": [10, 12]}))
  179. assert new_edit_date < dn.last_edit_date
  180. os.unlink(temp_file_path)
  181. def test_get_system_folder_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  182. temp_folder_path = tmpdir_factory.mktemp("data").strpath
  183. temp_file_path = os.path.join(temp_folder_path, "temp.parquet")
  184. pd.DataFrame([]).to_parquet(temp_file_path)
  185. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_folder_path})
  186. _DataManagerFactory._build_manager()._repository._save(dn)
  187. initial_edit_date = dn.last_edit_date
  188. # Sleep so that the file can be created successfully on Ubuntu
  189. sleep(0.1)
  190. pd.DataFrame(pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})).to_parquet(temp_file_path)
  191. first_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  192. assert dn.last_edit_date > initial_edit_date
  193. assert dn.last_edit_date == first_edit_date
  194. sleep(0.1)
  195. pd.DataFrame(pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})).to_parquet(temp_file_path)
  196. second_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  197. assert dn.last_edit_date > first_edit_date
  198. assert dn.last_edit_date == second_edit_date
  199. os.unlink(temp_file_path)
  200. def test_migrate_to_new_path(self, tmp_path):
  201. _base_path = os.path.join(tmp_path, ".data")
  202. path = os.path.join(_base_path, "test.parquet")
  203. # create a file on old path
  204. os.mkdir(_base_path)
  205. with open(path, "w"):
  206. pass
  207. dn = ParquetDataNode("foo_bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
  208. assert ".data" not in dn.path
  209. assert os.path.exists(dn.path)
  210. def test_is_downloadable(self):
  211. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  212. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  213. reasons = dn.is_downloadable()
  214. assert reasons
  215. assert reasons.reasons == ""
  216. def test_is_not_downloadable_no_file(self):
  217. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/wrong_path.parquet")
  218. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  219. reasons = dn.is_downloadable()
  220. assert not reasons
  221. assert len(reasons._reasons) == 1
  222. assert str(NoFileToDownload(_normalize_path(path), dn.id)) in reasons.reasons
  223. def test_is_not_downloadable_not_a_file(self):
  224. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample")
  225. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  226. reasons = dn.is_downloadable()
  227. assert not reasons
  228. assert len(reasons._reasons) == 1
  229. assert str(NotAFile(_normalize_path(path), dn.id)) in reasons.reasons
  230. def test_get_downloadable_path(self):
  231. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  232. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  233. assert re.split(r"[\\/]", dn._get_downloadable_path()) == re.split(r"[\\/]", path)
  234. def test_get_downloadable_path_with_not_existing_file(self):
  235. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": "NOT_EXISTING.parquet"})
  236. assert dn._get_downloadable_path() == ""
  237. def test_get_downloadable_path_as_directory_should_return_nothing(self):
  238. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
  239. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path})
  240. assert dn._get_downloadable_path() == ""
  241. def test_upload(self, parquet_file_path, tmpdir_factory):
  242. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  243. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  244. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
  245. _DataManagerFactory._build_manager()._repository._save(dn)
  246. dn.write(old_data)
  247. old_last_edit_date = dn.last_edit_date
  248. upload_content = pd.read_parquet(parquet_file_path)
  249. with freezegun.freeze_time(old_last_edit_date + timedelta(seconds=1)):
  250. dn._upload(parquet_file_path)
  251. assert_frame_equal(dn.read(), upload_content) # The content of the dn should change to the uploaded content
  252. assert dn.last_edit_date > old_last_edit_date
  253. assert dn.path == _normalize_path(old_parquet_path) # The path of the dn should not change
  254. def test_upload_with_upload_check_pandas(self, parquet_file_path, tmpdir_factory):
  255. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  256. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  257. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
  258. _DataManagerFactory._build_manager()._repository._save(dn)
  259. dn.write(old_data)
  260. old_last_edit_date = dn.last_edit_date
  261. def check_data_column(upload_path, upload_data):
  262. return upload_path.endswith(".parquet") and upload_data.columns.tolist() == ["a", "b", "c"]
  263. not_exists_parquet_path = tmpdir_factory.mktemp("data").join("not_exists.parquet").strpath
  264. reasons = dn._upload(not_exists_parquet_path, upload_checker=check_data_column)
  265. assert bool(reasons) is False
  266. assert (
  267. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file 'not_exists.parquet' can not be read,"
  268. f" therefore is not a valid data file for data node '{dn.id}'"
  269. )
  270. not_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_parquet").strpath
  271. old_data.to_parquet(not_parquet_path, index=False)
  272. # The upload should fail when the file is not a parquet
  273. reasons = dn._upload(not_parquet_path, upload_checker=check_data_column)
  274. assert bool(reasons) is False
  275. assert (
  276. str(list(reasons._reasons[dn.id])[0])
  277. == f"The uploaded file 'wrong_format_df.not_parquet' has invalid data for data node '{dn.id}'"
  278. )
  279. wrong_format_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.parquet").strpath
  280. pd.DataFrame([{"a": 1, "b": 2, "d": 3}, {"a": 4, "b": 5, "d": 6}]).to_parquet(
  281. wrong_format_parquet_path, index=False
  282. )
  283. # The upload should fail when check_data_column() return False
  284. reasons = dn._upload(wrong_format_parquet_path, upload_checker=check_data_column)
  285. assert bool(reasons) is False
  286. assert (
  287. str(list(reasons._reasons[dn.id])[0])
  288. == f"The uploaded file 'wrong_format_df.parquet' has invalid data for data node '{dn.id}'"
  289. )
  290. assert_frame_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  291. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  292. assert dn.path == _normalize_path(old_parquet_path) # The path of the dn should not change
  293. # The upload should succeed when check_data_column() return True
  294. assert dn._upload(parquet_file_path, upload_checker=check_data_column)
  295. def test_upload_with_upload_check_numpy(self, tmpdir_factory):
  296. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  297. old_data = np.array([[1, 2, 3], [4, 5, 6]])
  298. new_parquet_path = tmpdir_factory.mktemp("data").join("new_upload_data.parquet").strpath
  299. new_data = np.array([[1, 2, 3], [4, 5, 6]])
  300. pd.DataFrame(new_data, columns=["a", "b", "c"]).to_parquet(new_parquet_path, index=False)
  301. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "numpy"})
  302. _DataManagerFactory._build_manager()._repository._save(dn)
  303. dn.write(old_data)
  304. old_last_edit_date = dn.last_edit_date
  305. def check_data_is_positive(upload_path, upload_data):
  306. return upload_path.endswith(".parquet") and np.all(upload_data > 0)
  307. not_exists_parquet_path = tmpdir_factory.mktemp("data").join("not_exists.parquet").strpath
  308. reasons = dn._upload(not_exists_parquet_path, upload_checker=check_data_is_positive)
  309. assert bool(reasons) is False
  310. assert (
  311. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file 'not_exists.parquet' can not be read,"
  312. f" therefore is not a valid data file for data node '{dn.id}'"
  313. )
  314. not_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_parquet").strpath
  315. pd.DataFrame(old_data, columns=["a", "b", "c"]).to_parquet(not_parquet_path, index=False)
  316. # The upload should fail when the file is not a parquet
  317. reasons = dn._upload(not_parquet_path, upload_checker=check_data_is_positive)
  318. assert (
  319. str(list(reasons._reasons[dn.id])[0])
  320. == f"The uploaded file 'wrong_format_df.not_parquet' has invalid data for data node '{dn.id}'"
  321. )
  322. wrong_format_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.parquet").strpath
  323. pd.DataFrame(np.array([[-1, 2, 3], [-4, -5, -6]]), columns=["a", "b", "c"]).to_parquet(
  324. wrong_format_parquet_path, index=False
  325. )
  326. # The upload should fail when check_data_is_positive() return False
  327. reasons = dn._upload(wrong_format_parquet_path, upload_checker=check_data_is_positive)
  328. assert (
  329. str(list(reasons._reasons[dn.id])[0])
  330. == f"The uploaded file 'wrong_format_df.parquet' has invalid data for data node '{dn.id}'"
  331. )
  332. np.array_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  333. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  334. assert dn.path == _normalize_path(old_parquet_path) # The path of the dn should not change
  335. # The upload should succeed when check_data_is_positive() return True
  336. assert dn._upload(new_parquet_path, upload_checker=check_data_is_positive)