test_parquet_data_node.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. # Copyright 2021-2024 Avaiga Private Limited
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
  4. # the License. You may obtain a copy of the License at
  5. #
  6. # http://www.apache.org/licenses/LICENSE-2.0
  7. #
  8. # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  9. # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  10. # specific language governing permissions and limitations under the License.
  11. import os
  12. import pathlib
  13. import uuid
  14. from datetime import datetime, timedelta
  15. from importlib import util
  16. from time import sleep
  17. import freezegun
  18. import numpy as np
  19. import pandas as pd
  20. import pytest
  21. from pandas.testing import assert_frame_equal
  22. from taipy.config.common.scope import Scope
  23. from taipy.config.config import Config
  24. from taipy.config.exceptions.exceptions import InvalidConfigurationId
  25. from taipy.core.data._data_manager import _DataManager
  26. from taipy.core.data.data_node_id import DataNodeId
  27. from taipy.core.data.parquet import ParquetDataNode
  28. from taipy.core.exceptions.exceptions import (
  29. InvalidExposedType,
  30. UnknownCompressionAlgorithm,
  31. UnknownParquetEngine,
  32. )
  33. @pytest.fixture(scope="function", autouse=True)
  34. def cleanup():
  35. yield
  36. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.parquet")
  37. if os.path.isfile(path):
  38. os.remove(path)
  39. class MyCustomObject:
  40. def __init__(self, id, integer, text):
  41. self.id = id
  42. self.integer = integer
  43. self.text = text
  44. class MyOtherCustomObject:
  45. def __init__(self, id, sentence):
  46. self.id = id
  47. self.sentence = sentence
  48. def create_custom_class(**kwargs):
  49. return MyOtherCustomObject(id=kwargs["id"], sentence=kwargs["text"])
  50. class TestParquetDataNode:
  51. __engine = ["pyarrow"]
  52. if util.find_spec("fastparquet"):
  53. __engine.append("fastparquet")
  54. def test_create(self):
  55. path = "data/node/path"
  56. compression = "snappy"
  57. dn = ParquetDataNode(
  58. "foo_bar", Scope.SCENARIO, properties={"path": path, "compression": compression, "name": "super name"}
  59. )
  60. assert isinstance(dn, ParquetDataNode)
  61. assert dn.storage_type() == "parquet"
  62. assert dn.config_id == "foo_bar"
  63. assert dn.name == "super name"
  64. assert dn.scope == Scope.SCENARIO
  65. assert dn.id is not None
  66. assert dn.owner_id is None
  67. assert dn.last_edit_date is None
  68. assert dn.job_ids == []
  69. assert not dn.is_ready_for_reading
  70. assert dn.path == path
  71. assert dn.exposed_type == "pandas"
  72. assert dn.compression == "snappy"
  73. assert dn.engine == "pyarrow"
  74. with pytest.raises(InvalidConfigurationId):
  75. dn = ParquetDataNode("foo bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
  76. def test_get_user_properties(self, parquet_file_path):
  77. dn_1 = ParquetDataNode("dn_1", Scope.SCENARIO, properties={"path": parquet_file_path})
  78. assert dn_1._get_user_properties() == {}
  79. dn_2 = ParquetDataNode(
  80. "dn_2",
  81. Scope.SCENARIO,
  82. properties={
  83. "exposed_type": "numpy",
  84. "default_data": "foo",
  85. "default_path": parquet_file_path,
  86. "engine": "pyarrow",
  87. "compression": "snappy",
  88. "read_kwargs": {"columns": ["a", "b"]},
  89. "write_kwargs": {"index": False},
  90. "foo": "bar",
  91. },
  92. )
  93. # exposed_type, default_data, default_path, path, engine, compression, read_kwargs, write_kwargs
  94. # are filtered out
  95. assert dn_2._get_user_properties() == {"foo": "bar"}
  96. def test_new_parquet_data_node_with_existing_file_is_ready_for_reading(self, parquet_file_path):
  97. not_ready_dn_cfg = Config.configure_data_node(
  98. "not_ready_data_node_config_id", "parquet", path="NOT_EXISTING.parquet"
  99. )
  100. not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg]
  101. assert not not_ready_dn.is_ready_for_reading
  102. ready_dn_cfg = Config.configure_data_node("ready_data_node_config_id", "parquet", path=parquet_file_path)
  103. ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg]
  104. assert ready_dn.is_ready_for_reading
  105. @pytest.mark.parametrize(
  106. ["properties", "exists"],
  107. [
  108. ({}, False),
  109. ({"default_data": {"a": ["foo", "bar"]}}, True),
  110. ],
  111. )
  112. def test_create_with_default_data(self, properties, exists):
  113. dn = ParquetDataNode("foo", Scope.SCENARIO, DataNodeId(f"dn_id_{uuid.uuid4()}"), properties=properties)
  114. assert dn.path == os.path.join(Config.core.storage_folder.strip("/"), "parquets", dn.id + ".parquet")
  115. assert os.path.exists(dn.path) is exists
  116. @pytest.mark.parametrize("engine", __engine)
  117. def test_modin_deprecated_in_favor_of_pandas(self, engine, parquet_file_path):
  118. # Create ParquetDataNode with modin exposed_type
  119. props = {"path": parquet_file_path, "exposed_type": "modin", "engine": engine}
  120. parquet_data_node_as_modin = ParquetDataNode("bar", Scope.SCENARIO, properties=props)
  121. assert parquet_data_node_as_modin.properties["exposed_type"] == "pandas"
  122. data_modin = parquet_data_node_as_modin.read()
  123. assert isinstance(data_modin, pd.DataFrame)
  124. def test_set_path(self):
  125. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": "foo.parquet"})
  126. assert dn.path == "foo.parquet"
  127. dn.path = "bar.parquet"
  128. assert dn.path == "bar.parquet"
  129. def test_raise_error_unknown_parquet_engine(self):
  130. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  131. with pytest.raises(UnknownParquetEngine):
  132. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "engine": "foo"})
  133. def test_raise_error_unknown_compression_algorithm(self):
  134. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  135. with pytest.raises(UnknownCompressionAlgorithm):
  136. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "compression": "foo"})
  137. def test_raise_error_invalid_exposed_type(self):
  138. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  139. with pytest.raises(InvalidExposedType):
  140. ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "foo"})
  141. def test_get_system_file_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  142. temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.parquet"))
  143. pd.DataFrame([]).to_parquet(temp_file_path)
  144. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
  145. dn.write(pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]}))
  146. previous_edit_date = dn.last_edit_date
  147. sleep(0.1)
  148. pd.DataFrame(pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})).to_parquet(temp_file_path)
  149. new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  150. assert previous_edit_date < dn.last_edit_date
  151. assert new_edit_date == dn.last_edit_date
  152. sleep(0.1)
  153. dn.write(pd.DataFrame(data={"col1": [9, 10], "col2": [10, 12]}))
  154. assert new_edit_date < dn.last_edit_date
  155. os.unlink(temp_file_path)
  156. def test_get_system_folder_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  157. temp_folder_path = tmpdir_factory.mktemp("data").strpath
  158. temp_file_path = os.path.join(temp_folder_path, "temp.parquet")
  159. pd.DataFrame([]).to_parquet(temp_file_path)
  160. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": temp_folder_path})
  161. initial_edit_date = dn.last_edit_date
  162. # Sleep so that the file can be created successfully on Ubuntu
  163. sleep(0.1)
  164. pd.DataFrame(pd.DataFrame(data={"col1": [1, 2], "col2": [3, 4]})).to_parquet(temp_file_path)
  165. first_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  166. assert dn.last_edit_date > initial_edit_date
  167. assert dn.last_edit_date == first_edit_date
  168. sleep(0.1)
  169. pd.DataFrame(pd.DataFrame(data={"col1": [5, 6], "col2": [7, 8]})).to_parquet(temp_file_path)
  170. second_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  171. assert dn.last_edit_date > first_edit_date
  172. assert dn.last_edit_date == second_edit_date
  173. os.unlink(temp_file_path)
  174. def test_migrate_to_new_path(self, tmp_path):
  175. _base_path = os.path.join(tmp_path, ".data")
  176. path = os.path.join(_base_path, "test.parquet")
  177. # create a file on old path
  178. os.mkdir(_base_path)
  179. with open(path, "w"):
  180. pass
  181. dn = ParquetDataNode("foo_bar", Scope.SCENARIO, properties={"path": path, "name": "super name"})
  182. assert ".data" not in dn.path
  183. assert os.path.exists(dn.path)
  184. def test_get_downloadable_path(self):
  185. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.parquet")
  186. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  187. assert dn._get_downloadable_path() == path
  188. def test_get_downloadable_path_with_not_existing_file(self):
  189. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": "NOT_EXISTING.parquet"})
  190. assert dn._get_downloadable_path() == ""
  191. def test_get_downloadable_path_as_directory_should_return_nothing(self):
  192. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/parquet_example")
  193. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": path})
  194. assert dn._get_downloadable_path() == ""
  195. def test_upload(self, parquet_file_path, tmpdir_factory):
  196. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  197. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  198. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
  199. dn.write(old_data)
  200. old_last_edit_date = dn.last_edit_date
  201. upload_content = pd.read_parquet(parquet_file_path)
  202. with freezegun.freeze_time(old_last_edit_date + timedelta(seconds=1)):
  203. dn._upload(parquet_file_path)
  204. assert_frame_equal(dn.read(), upload_content) # The content of the dn should change to the uploaded content
  205. assert dn.last_edit_date > old_last_edit_date
  206. assert dn.path == old_parquet_path # The path of the dn should not change
  207. def test_upload_with_upload_check_pandas(self, parquet_file_path, tmpdir_factory):
  208. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  209. old_data = pd.DataFrame([{"a": 0, "b": 1, "c": 2}, {"a": 3, "b": 4, "c": 5}])
  210. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "pandas"})
  211. dn.write(old_data)
  212. old_last_edit_date = dn.last_edit_date
  213. def check_data_column(upload_path, upload_data):
  214. return upload_path.endswith(".parquet") and upload_data.columns.tolist() == ["a", "b", "c"]
  215. not_exists_parquet_path = tmpdir_factory.mktemp("data").join("not_exists.parquet").strpath
  216. reasons = dn._upload(not_exists_parquet_path, upload_checker=check_data_column)
  217. assert bool(reasons) is False
  218. assert (
  219. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file not_exists.parquet can not be read,"
  220. f' therefore is not a valid data file for data node "{dn.id}"'
  221. )
  222. not_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_parquet").strpath
  223. old_data.to_parquet(not_parquet_path, index=False)
  224. # The upload should fail when the file is not a parquet
  225. reasons = dn._upload(not_parquet_path, upload_checker=check_data_column)
  226. assert bool(reasons) is False
  227. assert (
  228. str(list(reasons._reasons[dn.id])[0])
  229. == f'The uploaded file wrong_format_df.not_parquet has invalid data for data node "{dn.id}"'
  230. )
  231. wrong_format_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.parquet").strpath
  232. pd.DataFrame([{"a": 1, "b": 2, "d": 3}, {"a": 4, "b": 5, "d": 6}]).to_parquet(
  233. wrong_format_parquet_path, index=False
  234. )
  235. # The upload should fail when check_data_column() return False
  236. reasons = dn._upload(wrong_format_parquet_path, upload_checker=check_data_column)
  237. assert bool(reasons) is False
  238. assert (
  239. str(list(reasons._reasons[dn.id])[0])
  240. == f'The uploaded file wrong_format_df.parquet has invalid data for data node "{dn.id}"'
  241. )
  242. assert_frame_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  243. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  244. assert dn.path == old_parquet_path # The path of the dn should not change
  245. # The upload should succeed when check_data_column() return True
  246. assert dn._upload(parquet_file_path, upload_checker=check_data_column)
  247. def test_upload_with_upload_check_numpy(self, tmpdir_factory):
  248. old_parquet_path = tmpdir_factory.mktemp("data").join("df.parquet").strpath
  249. old_data = np.array([[1, 2, 3], [4, 5, 6]])
  250. new_parquet_path = tmpdir_factory.mktemp("data").join("new_upload_data.parquet").strpath
  251. new_data = np.array([[1, 2, 3], [4, 5, 6]])
  252. pd.DataFrame(new_data, columns=["a", "b", "c"]).to_parquet(new_parquet_path, index=False)
  253. dn = ParquetDataNode("foo", Scope.SCENARIO, properties={"path": old_parquet_path, "exposed_type": "numpy"})
  254. dn.write(old_data)
  255. old_last_edit_date = dn.last_edit_date
  256. def check_data_is_positive(upload_path, upload_data):
  257. return upload_path.endswith(".parquet") and np.all(upload_data > 0)
  258. not_exists_parquet_path = tmpdir_factory.mktemp("data").join("not_exists.parquet").strpath
  259. reasons = dn._upload(not_exists_parquet_path, upload_checker=check_data_is_positive)
  260. assert bool(reasons) is False
  261. assert (
  262. str(list(reasons._reasons[dn.id])[0]) == "The uploaded file not_exists.parquet can not be read,"
  263. f' therefore is not a valid data file for data node "{dn.id}"'
  264. )
  265. not_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.not_parquet").strpath
  266. pd.DataFrame(old_data, columns=["a", "b", "c"]).to_parquet(not_parquet_path, index=False)
  267. # The upload should fail when the file is not a parquet
  268. reasons = dn._upload(not_parquet_path, upload_checker=check_data_is_positive)
  269. assert (
  270. str(list(reasons._reasons[dn.id])[0])
  271. == f'The uploaded file wrong_format_df.not_parquet has invalid data for data node "{dn.id}"'
  272. )
  273. wrong_format_parquet_path = tmpdir_factory.mktemp("data").join("wrong_format_df.parquet").strpath
  274. pd.DataFrame(np.array([[-1, 2, 3], [-4, -5, -6]]), columns=["a", "b", "c"]).to_parquet(
  275. wrong_format_parquet_path, index=False
  276. )
  277. # The upload should fail when check_data_is_positive() return False
  278. reasons = dn._upload(wrong_format_parquet_path, upload_checker=check_data_is_positive)
  279. assert (
  280. str(list(reasons._reasons[dn.id])[0])
  281. == f'The uploaded file wrong_format_df.parquet has invalid data for data node "{dn.id}"'
  282. )
  283. np.array_equal(dn.read(), old_data) # The content of the dn should not change when upload fails
  284. assert dn.last_edit_date == old_last_edit_date # The last edit date should not change when upload fails
  285. assert dn.path == old_parquet_path # The path of the dn should not change
  286. # The upload should succeed when check_data_is_positive() return True
  287. assert dn._upload(new_parquet_path, upload_checker=check_data_is_positive)