test_csv_data_node.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. # Copyright 2023 Avaiga Private Limited
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
  4. # the License. You may obtain a copy of the License at
  5. #
  6. # http://www.apache.org/licenses/LICENSE-2.0
  7. #
  8. # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
  9. # an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
  10. # specific language governing permissions and limitations under the License.
  11. import os
  12. import pathlib
  13. from datetime import datetime
  14. from time import sleep
  15. import modin.pandas as modin_pd
  16. import numpy as np
  17. import pandas as pd
  18. import pytest
  19. from modin.pandas.test.utils import df_equals
  20. from pandas.testing import assert_frame_equal
  21. from src.taipy.core.data._data_manager import _DataManager
  22. from src.taipy.core.data.csv import CSVDataNode
  23. from src.taipy.core.data.data_node_id import DataNodeId
  24. from src.taipy.core.data.operator import JoinOperator, Operator
  25. from src.taipy.core.exceptions.exceptions import InvalidExposedType, NoData
  26. from taipy.config.common.scope import Scope
  27. from taipy.config.config import Config
  28. from taipy.config.exceptions.exceptions import InvalidConfigurationId
  29. @pytest.fixture(scope="function", autouse=True)
  30. def cleanup():
  31. yield
  32. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.csv")
  33. if os.path.isfile(path):
  34. os.remove(path)
  35. class MyCustomObject:
  36. def __init__(self, id, integer, text):
  37. self.id = id
  38. self.integer = integer
  39. self.text = text
  40. class TestCSVDataNode:
  41. def test_create(self):
  42. path = "data/node/path"
  43. dn = CSVDataNode(
  44. "foo_bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"}
  45. )
  46. assert isinstance(dn, CSVDataNode)
  47. assert dn.storage_type() == "csv"
  48. assert dn.config_id == "foo_bar"
  49. assert dn.name == "super name"
  50. assert dn.scope == Scope.SCENARIO
  51. assert dn.id is not None
  52. assert dn.owner_id is None
  53. assert dn.last_edit_date is None
  54. assert dn.job_ids == []
  55. assert not dn.is_ready_for_reading
  56. assert dn.path == path
  57. assert dn.has_header is False
  58. assert dn.exposed_type == "pandas"
  59. with pytest.raises(InvalidConfigurationId):
  60. dn = CSVDataNode(
  61. "foo bar", Scope.SCENARIO, properties={"path": path, "has_header": False, "name": "super name"}
  62. )
  63. def test_get_user_properties(self, csv_file):
  64. dn_1 = CSVDataNode("dn_1", Scope.SCENARIO, properties={"path": "data/node/path"})
  65. assert dn_1._get_user_properties() == {}
  66. dn_2 = CSVDataNode(
  67. "dn_2",
  68. Scope.SCENARIO,
  69. properties={
  70. "exposed_type": "numpy",
  71. "default_data": "foo",
  72. "default_path": csv_file,
  73. "has_header": False,
  74. "foo": "bar",
  75. },
  76. )
  77. # exposed_type, default_data, default_path, path, has_header, sheet_name are filtered out
  78. assert dn_2._get_user_properties() == {"foo": "bar"}
  79. def test_new_csv_data_node_with_existing_file_is_ready_for_reading(self):
  80. not_ready_dn_cfg = Config.configure_data_node("not_ready_data_node_config_id", "csv", path="NOT_EXISTING.csv")
  81. not_ready_dn = _DataManager._bulk_get_or_create([not_ready_dn_cfg])[not_ready_dn_cfg]
  82. assert not not_ready_dn.is_ready_for_reading
  83. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  84. ready_dn_cfg = Config.configure_data_node("ready_data_node_config_id", "csv", path=path)
  85. ready_dn = _DataManager._bulk_get_or_create([ready_dn_cfg])[ready_dn_cfg]
  86. assert ready_dn.is_ready_for_reading
  87. @pytest.mark.parametrize(
  88. ["properties", "exists"],
  89. [
  90. ({}, False),
  91. ({"default_data": ["foo", "bar"]}, True),
  92. ],
  93. )
  94. def test_create_with_default_data(self, properties, exists):
  95. dn = CSVDataNode("foo", Scope.SCENARIO, DataNodeId("dn_id"), properties=properties)
  96. assert os.path.exists(dn.path) is exists
  97. def test_read_with_header(self):
  98. not_existing_csv = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.csv", "has_header": True})
  99. with pytest.raises(NoData):
  100. assert not_existing_csv.read() is None
  101. not_existing_csv.read_or_raise()
  102. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  103. # # Create CSVDataNode without exposed_type (Default is pandas.DataFrame)
  104. csv_data_node_as_pandas = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path})
  105. data_pandas = csv_data_node_as_pandas.read()
  106. assert isinstance(data_pandas, pd.DataFrame)
  107. assert len(data_pandas) == 10
  108. assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path).to_numpy())
  109. # Create CSVDataNode with modin exposed_type
  110. csv_data_node_as_modin = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "exposed_type": "modin"})
  111. data_modin = csv_data_node_as_modin.read()
  112. assert isinstance(data_modin, modin_pd.DataFrame)
  113. assert len(data_modin) == 10
  114. assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path).to_numpy())
  115. # Create CSVDataNode with numpy exposed_type
  116. csv_data_node_as_numpy = CSVDataNode(
  117. "bar", Scope.SCENARIO, properties={"path": path, "has_header": True, "exposed_type": "numpy"}
  118. )
  119. data_numpy = csv_data_node_as_numpy.read()
  120. assert isinstance(data_numpy, np.ndarray)
  121. assert len(data_numpy) == 10
  122. assert np.array_equal(data_numpy, pd.read_csv(path).to_numpy())
  123. # Create the same CSVDataNode but with custom exposed_type
  124. csv_data_node_as_custom_object = CSVDataNode(
  125. "bar", Scope.SCENARIO, properties={"path": path, "exposed_type": MyCustomObject}
  126. )
  127. data_custom = csv_data_node_as_custom_object.read()
  128. assert isinstance(data_custom, list)
  129. assert len(data_custom) == 10
  130. for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom):
  131. assert isinstance(row_custom, MyCustomObject)
  132. assert row_pandas["id"] == row_custom.id
  133. assert str(row_pandas["integer"]) == row_custom.integer
  134. assert row_pandas["text"] == row_custom.text
  135. def test_read_without_header(self):
  136. not_existing_csv = CSVDataNode("foo", Scope.SCENARIO, properties={"path": "WRONG.csv", "has_header": False})
  137. with pytest.raises(NoData):
  138. assert not_existing_csv.read() is None
  139. not_existing_csv.read_or_raise()
  140. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  141. # Create CSVDataNode without exposed_type (Default is pandas.DataFrame)
  142. csv_data_node_as_pandas = CSVDataNode("bar", Scope.SCENARIO, properties={"path": path, "has_header": False})
  143. data_pandas = csv_data_node_as_pandas.read()
  144. assert isinstance(data_pandas, pd.DataFrame)
  145. assert len(data_pandas) == 11
  146. assert np.array_equal(data_pandas.to_numpy(), pd.read_csv(path, header=None).to_numpy())
  147. # Create CSVDataNode with modin exposed_type
  148. csv_data_node_as_modin = CSVDataNode(
  149. "baz", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "modin"}
  150. )
  151. data_modin = csv_data_node_as_modin.read()
  152. assert isinstance(data_modin, modin_pd.DataFrame)
  153. assert len(data_modin) == 11
  154. assert np.array_equal(data_modin.to_numpy(), modin_pd.read_csv(path, header=None).to_numpy())
  155. # Create CSVDataNode with numpy exposed_type
  156. csv_data_node_as_numpy = CSVDataNode(
  157. "qux", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": "numpy"}
  158. )
  159. data_numpy = csv_data_node_as_numpy.read()
  160. assert isinstance(data_numpy, np.ndarray)
  161. assert len(data_numpy) == 11
  162. assert np.array_equal(data_numpy, pd.read_csv(path, header=None).to_numpy())
  163. # Create the same CSVDataNode but with custom exposed_type
  164. csv_data_node_as_custom_object = CSVDataNode(
  165. "quux", Scope.SCENARIO, properties={"path": path, "has_header": False, "exposed_type": MyCustomObject}
  166. )
  167. data_custom = csv_data_node_as_custom_object.read()
  168. assert isinstance(data_custom, list)
  169. assert len(data_custom) == 11
  170. for (index, row_pandas), row_custom in zip(data_pandas.iterrows(), data_custom):
  171. assert isinstance(row_custom, MyCustomObject)
  172. assert row_pandas[0] == row_custom.id
  173. assert str(row_pandas[1]) == row_custom.integer
  174. assert row_pandas[2] == row_custom.text
  175. @pytest.mark.parametrize(
  176. "content",
  177. [
  178. ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
  179. (pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
  180. ([[11, 22, 33], [44, 55, 66]]),
  181. ],
  182. )
  183. def test_append(self, csv_file, default_data_frame, content):
  184. csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file})
  185. assert_frame_equal(csv_dn.read(), default_data_frame)
  186. csv_dn.append(content)
  187. assert_frame_equal(
  188. csv_dn.read(),
  189. pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(drop=True),
  190. )
  191. @pytest.mark.parametrize(
  192. "content",
  193. [
  194. ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}]),
  195. (pd.DataFrame([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}])),
  196. ([[11, 22, 33], [44, 55, 66]]),
  197. ],
  198. )
  199. def test_append_modin(self, csv_file, default_data_frame, content):
  200. csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
  201. df_equals(csv_dn.read(), modin_pd.DataFrame(default_data_frame))
  202. csv_dn.append(content)
  203. df_equals(
  204. csv_dn.read(),
  205. modin_pd.concat([default_data_frame, pd.DataFrame(content, columns=["a", "b", "c"])]).reset_index(
  206. drop=True
  207. ),
  208. )
  209. @pytest.mark.parametrize(
  210. "content,columns",
  211. [
  212. ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}], None),
  213. ([[11, 22, 33], [44, 55, 66]], None),
  214. ([[11, 22, 33], [44, 55, 66]], ["e", "f", "g"]),
  215. ],
  216. )
  217. def test_write(self, csv_file, default_data_frame, content, columns):
  218. csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file})
  219. assert np.array_equal(csv_dn.read().values, default_data_frame.values)
  220. if not columns:
  221. csv_dn.write(content)
  222. df = pd.DataFrame(content)
  223. else:
  224. csv_dn.write_with_column_names(content, columns)
  225. df = pd.DataFrame(content, columns=columns)
  226. assert np.array_equal(csv_dn.read().values, df.values)
  227. csv_dn.write(None)
  228. assert len(csv_dn.read()) == 0
  229. def test_write_with_different_encoding(self, csv_file):
  230. data = pd.DataFrame([{"≥a": 1, "b": 2}])
  231. utf8_dn = CSVDataNode("utf8_dn", Scope.SCENARIO, properties={"default_path": csv_file})
  232. utf16_dn = CSVDataNode("utf16_dn", Scope.SCENARIO, properties={"default_path": csv_file, "encoding": "utf-16"})
  233. # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding
  234. utf8_dn.write(data)
  235. assert np.array_equal(utf8_dn.read(), data)
  236. with pytest.raises(UnicodeError):
  237. utf16_dn.read()
  238. # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding
  239. utf16_dn.write(data)
  240. assert np.array_equal(utf16_dn.read(), data)
  241. with pytest.raises(UnicodeError):
  242. utf8_dn.read()
  243. @pytest.mark.parametrize(
  244. "content,columns",
  245. [
  246. ([{"a": 11, "b": 22, "c": 33}, {"a": 44, "b": 55, "c": 66}], None),
  247. ([[11, 22, 33], [44, 55, 66]], None),
  248. ([[11, 22, 33], [44, 55, 66]], ["e", "f", "g"]),
  249. ],
  250. )
  251. def test_write_modin(self, csv_file, default_data_frame, content, columns):
  252. default_data_frame = modin_pd.DataFrame(default_data_frame)
  253. csv_dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
  254. assert np.array_equal(csv_dn.read().values, default_data_frame.values)
  255. if not columns:
  256. csv_dn.write(content)
  257. df = pd.DataFrame(content)
  258. else:
  259. csv_dn.write_with_column_names(content, columns)
  260. df = pd.DataFrame(content, columns=columns)
  261. assert np.array_equal(csv_dn.read().values, df.values)
  262. csv_dn.write(None)
  263. assert len(csv_dn.read()) == 0
  264. def test_write_modin_with_different_encoding(self, csv_file):
  265. data = pd.DataFrame([{"≥a": 1, "b": 2}])
  266. utf8_dn = CSVDataNode("utf8_dn", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
  267. utf16_dn = CSVDataNode(
  268. "utf16_dn", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin", "encoding": "utf-16"}
  269. )
  270. # If a file is written with utf-8 encoding, it can only be read with utf-8, not utf-16 encoding
  271. utf8_dn.write(data)
  272. assert np.array_equal(utf8_dn.read(), data)
  273. with pytest.raises(UnicodeError):
  274. utf16_dn.read()
  275. # If a file is written with utf-16 encoding, it can only be read with utf-16, not utf-8 encoding
  276. utf16_dn.write(data)
  277. assert np.array_equal(utf16_dn.read(), data)
  278. with pytest.raises(UnicodeError):
  279. utf8_dn.read()
  280. def test_set_path(self):
  281. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": "foo.csv"})
  282. assert dn.path == "foo.csv"
  283. dn.path = "bar.csv"
  284. assert dn.path == "bar.csv"
  285. def test_read_write_after_modify_path(self):
  286. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  287. new_path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/temp.csv")
  288. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"default_path": path})
  289. read_data = dn.read()
  290. assert read_data is not None
  291. dn.path = new_path
  292. with pytest.raises(FileNotFoundError):
  293. dn.read()
  294. dn.write(read_data)
  295. assert dn.read().equals(read_data)
  296. def test_pandas_exposed_type(self):
  297. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  298. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "pandas"})
  299. assert isinstance(dn.read(), pd.DataFrame)
  300. def test_filter_pandas_exposed_type(self, csv_file):
  301. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "pandas"})
  302. dn.write(
  303. [
  304. {"foo": 1, "bar": 1},
  305. {"foo": 1, "bar": 2},
  306. {"foo": 1},
  307. {"foo": 2, "bar": 2},
  308. {"bar": 2},
  309. ]
  310. )
  311. # Test datanode indexing and slicing
  312. assert dn["foo"].equals(pd.Series([1, 1, 1, 2, None]))
  313. assert dn["bar"].equals(pd.Series([1, 2, None, 2, 2]))
  314. assert dn[:2].equals(pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
  315. # Test filter data
  316. filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
  317. filtered_by_indexing = dn[dn["foo"] == 1]
  318. expected_data = pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}, {"foo": 1.0}])
  319. assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
  320. assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
  321. filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
  322. filtered_by_indexing = dn[dn["foo"] != 1]
  323. expected_data = pd.DataFrame([{"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
  324. assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
  325. assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
  326. filtered_by_filter_method = dn.filter(("bar", 2, Operator.EQUAL))
  327. filtered_by_indexing = dn[dn["bar"] == 2]
  328. expected_data = pd.DataFrame([{"foo": 1.0, "bar": 2.0}, {"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
  329. assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
  330. assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
  331. filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
  332. filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
  333. expected_data = pd.DataFrame(
  334. [
  335. {"foo": 1.0, "bar": 1.0},
  336. {"foo": 1.0, "bar": 2.0},
  337. {"foo": 2.0, "bar": 2.0},
  338. {"bar": 2.0},
  339. ]
  340. )
  341. assert_frame_equal(filtered_by_filter_method.reset_index(drop=True), expected_data)
  342. assert_frame_equal(filtered_by_indexing.reset_index(drop=True), expected_data)
  343. def test_filter_modin_exposed_type(self, csv_file):
  344. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "modin"})
  345. dn.write(
  346. [
  347. {"foo": 1, "bar": 1},
  348. {"foo": 1, "bar": 2},
  349. {"foo": 1},
  350. {"foo": 2, "bar": 2},
  351. {"bar": 2},
  352. ]
  353. )
  354. # Test datanode indexing and slicing
  355. assert dn["foo"].equals(modin_pd.Series([1, 1, 1, 2, None]))
  356. assert dn["bar"].equals(modin_pd.Series([1, 2, None, 2, 2]))
  357. assert dn[:2].equals(modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}]))
  358. # Test filter data
  359. filtered_by_filter_method = dn.filter(("foo", 1, Operator.EQUAL))
  360. filtered_by_indexing = dn[dn["foo"] == 1]
  361. expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 1.0}, {"foo": 1.0, "bar": 2.0}, {"foo": 1.0}])
  362. df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
  363. df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
  364. filtered_by_filter_method = dn.filter(("foo", 1, Operator.NOT_EQUAL))
  365. filtered_by_indexing = dn[dn["foo"] != 1]
  366. expected_data = modin_pd.DataFrame([{"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
  367. df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
  368. df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
  369. filtered_by_filter_method = dn.filter(("bar", 2, Operator.EQUAL))
  370. filtered_by_indexing = dn[dn["bar"] == 2]
  371. expected_data = modin_pd.DataFrame([{"foo": 1.0, "bar": 2.0}, {"foo": 2.0, "bar": 2.0}, {"bar": 2.0}])
  372. df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
  373. df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
  374. filtered_by_filter_method = dn.filter([("bar", 1, Operator.EQUAL), ("bar", 2, Operator.EQUAL)], JoinOperator.OR)
  375. filtered_by_indexing = dn[(dn["bar"] == 1) | (dn["bar"] == 2)]
  376. expected_data = modin_pd.DataFrame(
  377. [
  378. {"foo": 1.0, "bar": 1.0},
  379. {"foo": 1.0, "bar": 2.0},
  380. {"foo": 2.0, "bar": 2.0},
  381. {"bar": 2.0},
  382. ]
  383. )
  384. df_equals(filtered_by_filter_method.reset_index(drop=True), expected_data)
  385. df_equals(filtered_by_indexing.reset_index(drop=True), expected_data)
  386. def test_filter_numpy_exposed_type(self, csv_file):
  387. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": csv_file, "exposed_type": "numpy"})
  388. dn.write(
  389. [
  390. [1, 1],
  391. [1, 2],
  392. [1, 3],
  393. [2, 1],
  394. [2, 2],
  395. [2, 3],
  396. ]
  397. )
  398. # Test datanode indexing and slicing
  399. assert np.array_equal(dn[0], np.array([1, 1]))
  400. assert np.array_equal(dn[1], np.array([1, 2]))
  401. assert np.array_equal(dn[:3], np.array([[1, 1], [1, 2], [1, 3]]))
  402. assert np.array_equal(dn[:, 0], np.array([1, 1, 1, 2, 2, 2]))
  403. assert np.array_equal(dn[1:4, :1], np.array([[1], [1], [2]]))
  404. # Test filter data
  405. assert np.array_equal(dn.filter((0, 1, Operator.EQUAL)), np.array([[1, 1], [1, 2], [1, 3]]))
  406. assert np.array_equal(dn[dn[:, 0] == 1], np.array([[1, 1], [1, 2], [1, 3]]))
  407. assert np.array_equal(dn.filter((0, 1, Operator.NOT_EQUAL)), np.array([[2, 1], [2, 2], [2, 3]]))
  408. assert np.array_equal(dn[dn[:, 0] != 1], np.array([[2, 1], [2, 2], [2, 3]]))
  409. assert np.array_equal(dn.filter((1, 2, Operator.EQUAL)), np.array([[1, 2], [2, 2]]))
  410. assert np.array_equal(dn[dn[:, 1] == 2], np.array([[1, 2], [2, 2]]))
  411. assert np.array_equal(
  412. dn.filter([(1, 1, Operator.EQUAL), (1, 2, Operator.EQUAL)], JoinOperator.OR),
  413. np.array([[1, 1], [1, 2], [2, 1], [2, 2]]),
  414. )
  415. assert np.array_equal(dn[(dn[:, 1] == 1) | (dn[:, 1] == 2)], np.array([[1, 1], [1, 2], [2, 1], [2, 2]]))
  416. def test_raise_error_invalid_exposed_type(self):
  417. path = os.path.join(pathlib.Path(__file__).parent.resolve(), "data_sample/example.csv")
  418. with pytest.raises(InvalidExposedType):
  419. CSVDataNode("foo", Scope.SCENARIO, properties={"path": path, "exposed_type": "foo"})
  420. def test_get_system_modified_date_instead_of_last_edit_date(self, tmpdir_factory):
  421. temp_file_path = str(tmpdir_factory.mktemp("data").join("temp.csv"))
  422. pd.DataFrame([]).to_csv(temp_file_path)
  423. dn = CSVDataNode("foo", Scope.SCENARIO, properties={"path": temp_file_path, "exposed_type": "pandas"})
  424. dn.write(pd.DataFrame([1, 2, 3]))
  425. previous_edit_date = dn.last_edit_date
  426. sleep(0.1)
  427. pd.DataFrame([4, 5, 6]).to_csv(temp_file_path)
  428. new_edit_date = datetime.fromtimestamp(os.path.getmtime(temp_file_path))
  429. assert previous_edit_date < dn.last_edit_date
  430. assert new_edit_date == dn.last_edit_date
  431. sleep(0.1)
  432. dn.write(pd.DataFrame([7, 8, 9]))
  433. assert new_edit_date < dn.last_edit_date
  434. os.unlink(temp_file_path)