url_test.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373
  1. # -*- codeing = utf-8 -*-
  2. # @Time : 2022/8/12 11:04
  3. # @Author : Clown
  4. # @File : url_test.py
  5. # @Software : PyCharm
  6. import requests
  7. import random
  8. from datetime import datetime,timedelta
  9. import time
  10. import os
  11. import shutil
  12. beginTime = (datetime.today() + timedelta(days=-1)).strftime('%Y-%m-%d') #2022-06-23
  13. endTime = (datetime.today() + timedelta(days=-1)).strftime('%Y-%m-%d') #2022-06-23
  14. beginTime_int =int((datetime.today() + timedelta(days=-1)).strftime('%Y%m%d')) #20220623
  15. endTime_int = int((datetime.today() + timedelta(days=-1)).strftime('%Y%m%d')) #20220623
  16. # print(beginTime)
  17. # print(beginTime_int)
  18. # print(endTime)
  19. # print(endTime_int)
  20. # url = 'https://app-api.shop.ele.me/stats/invoke/?method=CustomerAnalysisService.customerHierarchyData'
  21. # headers = {
  22. # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',
  23. # 'content-type': 'application/json',
  24. # 'accept': 'application/json, text/plain, */*',
  25. # 'sec-ch-ua': '"Chromium";v="104", " Not A;Brand";v="99", "Google Chrome";v="104"',
  26. # 'sec-ch-ua-mobile': '?0',
  27. # 'sec-ch-ua-platform': "Windows",
  28. # 'sec-fetch-dest': 'empty',
  29. # 'sec-fetch-mode': 'cors',
  30. # 'sec-fetch-site': 'same-site',
  31. # 'referer': 'https://melody-stats-next.faas.ele.me/',
  32. # 'bx-ua':'140#sWTn783NzzFxZQo23zOb4pN8s77p6gpTW8p1LaAcsY3eQqK+StVLHua1dDOK3uLbYsbdkv/qlbzx1kO9F05pzzrM+1Pslp6Q+dxFJu8hIM5fjwVLsYl2qCCB6mBDh63TR4WQhToylULTMtWlSqPZqnCRe/km9L68qpyj69j0vc3566CUEYoDFDLBuYNFgTAM//mMp6PN8CAhw6vQIkPBXI6E/Y0KT616zPzbVXlqlbnzvkzuczCLSFFhYiQX/HaZxHNHnxblQRrrJQFucoEQSQPyYi1jWLPWxmHfniOaM9+r1BFqc0DKSQFDOi1jCpDWxL+DniOFV9+r+5+qcvvZSQFTOiTBcsafxoTdcZM7ULfrvxQXnrK1yQPWK5OqnGYwxzWyrZfoSGQrI2XFdnofvFWaOi2wz3cqxxOxr5uVkRbrhxbArA0U0FPHhxXP2BjBxzjud5+n5mzrrHQ6KFCIsFPiYW1YP4wExK+c4EsCYmFrYOM53wjfJFWsvozM/OP6x2KG+oZUT7MrrYbQ3dndJFPMDozMzmP6xK2X+oZ6lhMrzE1Q3daXJQPAhV2UruKgxoH4+VRDsBTr79T83I/rJQPoJE2UPkwgxx8v+Et6aBbrVQue+A8pNFFtnoFQoNUhxonb3E3bMBOr3Ezl+UmzNbFz4VuE+gcbxLnNmPSWQjzr+XTpHfH5OzPiv8M92TDbx2KgmPyETazrrjMpHA4DOzPQoPM9DBIbxoOumPyiQjzraVQpHW4EOFPZhDQOrTKixoccmDio0jrrOLXsHDmXOFPzpIQOuGIixxPvmDiWUHrrsFusHzHROFWid8QOjtDixHOemDbE0arrSKfNHi4jOQF5iD1aF9aVxHDfmIOVUY+rbBFNHvKtOQFFOI1af6UVxmzDmIOovj+r1x2NHbclOQFiL2Y4llfzzPziVdcQsJFruQl8xrvZrI7ZbQ2zO1UD46AdPqo6oZ7u4hNoglLDs8Ulna9CGAaAIphEea+ZZSHFeK03wsES4fhU1hEVmuLmeJNCEf2bVrzX5UT2ixNjIVdNliOCF+uAwF471NYmiiX7B/AEjtUOxtGa3s68L7tn+IburNM7PktGHfAIACWrsn48WtYM/IwO6rD3dmtA9QxfDwAYuk46scZnvKHrQ9vDA6EniUoyI/ZUoBTaxQ0Q02m6lsw6DZC+ptnqJs5dh046w9d43N3CKQgemkXyMcZ+iwAUU2XLs6PBA+noJdOdzFRBo0U32ahTHSUBMk1tl00gasrIt13ZAgWXm5x6DTLZYvxwdXhSIf2vqg1cI71IeogQsPaVfdnDbMuj5nOVf91lQU1Yvfd5qUgchPLxOdbSJsl1zrvkPVHjVYWRZfga1uomwCgIALgSLKOsOAxyuy9zziuNn/kYHS6jBwerz1kqJWgn4RzCR1ukCGB70hu9FsbcW9izleQ+xk42OWRA5HGUHdtxChDzzKD4yr6kulnar4TV/OVGbQBY9TnNFwQ+mj/4UNKNr3bUAWR6r5ZrrpigmMktejvY0+e1EZVtvlqlngrSn5FQeeJGqnh8wF/mJ3vJapWXbaEBXRyj2rdKyAQZJjGysFVzIaKj5XVjF5MFQJOlazMQebn9CVAHd1dqIUNM7FzzggPzKSXeDryjZ4aO8lFfQFygaqEC4YJxCWDzr5rDQ4vjx3KrkoZzHyNM/X82Vv0IM/G81l3BMCzbPl8flQP4Ffj5hvjEW41vUWUqtv3WK3n5paD5VjYgbiWVknbC3+K0VHIWTns3Ix8o8OGckKDHwDuXIuiBYYVxcT7FEpwlTsVewtZnGd6ud+I3p0geqFscOqUq/914WMt0rRovWLHgm+UlqSgwlB9fRtUB7xMfO3lBcIlW2Vf4O4UrOR+xBpNN1pDfesSts9qKwo+xHKpAmbha04WDeBIcxuS5vCMgbq5PcAu6Bi467MS+XTJEjbeVEDeWcQicOYzweB1xc4q/JN6nERixJ1KfqQH1FIhM3VZC4iwZiq62D9TBfWiFwHPugDhKfE5/z7sXyY5nxzfARfb/AJFREfkz95t6IJz89o/AfVsDkK/3XdO+cpNpxbSoQxrZLOIJaNm1o577vLC2zzxqx1yJPo3W1lD7aoUkDpbtZS122t/yTfuLNIBtwNINwXs9kIFCDyD7lTFc+lHzMJxAbgMU/qzkRNwmo8KfcVHF+gexk5vYlGEgsFfckjgizbEW+3g1sEiaA38sV9lGhBUwfnsSieQUB5cF9dtI1JGHQ8JlukFDXD1axrRoTUfSj2E7Aew7qVADFpBGvsiHd8IsirLUPAXUQK/JqvBeJ2IBNodqM0PZkrLhyJnYv3FcYGv9HFtVi2jZSCP2Co6Mv/sd0f0u2om+u0WmQgz5Hlwmr16IpkWBKoa/VS9mvaQHAz=='
  33. #
  34. # }
  35. # params = {"id":"3CF8E83EF1FE4D299B2E44E83328B50D|1619496050955",
  36. # "metas":{"appName":"melody",
  37. # "appVersion":"4.4.0",
  38. # "ksid":"YZY0MZMTA1Mjc0Mzc5NjQzNTAxT1ZzaTNlKzlQ",
  39. # "shopId":173858169},
  40. # "service":"CustomerAnalysisService",
  41. # "method":"customerHierarchyData",
  42. # "params":{"req":{"shopId":173858169,
  43. # "timeEnum":"NEARLY_90_DAYS"}},
  44. # "ncp":"2.0.0"}
  45. # # resp = requests.post(url,headers=headers,json=params,).json()
  46. # # print(resp)
  47. # # highFreqCustomer30 = resp['result']['highFreqCustomer']['value']
  48. # # mediumFreqCustomer30 = resp['result']['mediumFreqCustomer']['value']
  49. # # lowFreqCustomer30 = resp['result']['lowFreqCustomer']['value']
  50. # # CustomerNum30days = int (highFreqCustomer30) + int (mediumFreqCustomer30) + int (lowFreqCustomer30)
  51. # # print(CustomerNum30days)
  52. # print(random.uniform(0.05,1))
  53. # url = 'https://api.dinghuo123.com/v2/oauth2/token'
  54. # params = {'userName':'jiangxiaobaixz',
  55. # 'password':'jiangxiaobaixz888',
  56. # 'client_id':'1111',
  57. # 'client_secret':'3sfX3HJX484gXYz',
  58. # 'grant_type':'client_credentials',
  59. # 'scope':'basic'}
  60. # headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',
  61. # 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
  62. # resp = requests.get(url,params=params,headers=headers).text
  63. # print(resp)
  64. # from collections import Counter
  65. # a=['日期', '门店名称', '门店编号', '城市名称', '营业时长', '高峰期营业时长', '有效订单', '无效订单', '收入', '营业额', '支出', '顾客实付总额', '单均实付', '单均收入', '商品销售额', '餐盒费', '自配送费', '其他营业额', '抽佣', '活动补贴', '代金券补贴', '配送费补贴', '智能满减补贴', '智能满减服务费', '基础物流费', '其他服务费支出', '商户原因无效订单数', '曝光人数', '曝光次数', '进店次数', '进店人数', '进店转化率', '下单转化率', '新客下单转化率', '老客下单转化率', '参与活动数', '活动订单数', '活动订单占比', '满减活动订单数', '超会活动订单数', '配送活动订单数', '投入产出比', '活动总补贴', '饿了么补贴', '代理商补贴', '商家活动成本(含满减活动)', '商家活动成本(不含满减活动)', '营销力度(含满减活动)', '营销力度(不含满减活动)', '下单人数', '近7日复购人数', '近7日复购率', '近30日复购人数', '近30日复购率', '新客人数', '老客人数', '上架商品数', '有交易商品数', '库存不足商品数', '新上架商品数', '活动商品数', '差评订单数', '投诉订单数', '投诉订单id', '出餐超时订单数', '出餐超时订单id', '单均出餐时长', '催单数', '拒单数', '出餐宝扫码出餐订单数', '商责取消数', '商责超时数', '商责退单数', '商责催单数', '商责取消率', '商责超时率', '商责退单率', '商责催单率', '店铺评分', '满意度得分', '味道得分', '包装得分', '好评率', '好评数', '中评率', '中评数', '差评率', '差评数', '优质评价率', '优质评价数', '订单评价率', '订单评价数', '差评人工回复率']
  66. #
  67. # b=['日期', '门店名称', '城市名称', '营业时长', '高峰期营业时长', '门店编号','有效订单', '无效订单', '收入', '营业额', '支出', '顾客实付总额', '单均实付', '单均收入', '商品销售额', '餐盒费', '自配送费', '其他营业额', '抽佣', '活动补贴', '代金券补贴', '配送费补贴', '智能满减补贴', '智能满减服务费', '基础物流费', '其他服务费支出', '商户原因无效订单数', '曝光人数', '曝光次数', '进店次数', '进店人数', '进店转化率', '下单转化率', '新客下单转化率', '老客下单转化率', '参与活动数', '活动订单数', '活动订单占比', '满减活动订单数', '超会活动订单数', '配送活动订单数', '投入产出比', '活动总补贴', '饿了么补贴', '代理商补贴', '商家活动成本(含满减活动)', '商家活动成本(不含满减活动)', '营销力度(含满减活动)', '营销力度(不含满减活动)', '下单人数', '近7日复购人数', '近7日复购率', '近30日复购人数', '近30日复购率', '新客人数', '老客人数', '上架商品数', '有交易商品数', '库存不足商品数', '新上架商品数', '活动商品数', '差评订单数', '投诉订单数', '投诉订单id', '出餐超时订单数', '出餐超时订单id', '单均出餐时长', '催单数', '拒单数', '出餐宝扫码出餐订单数', '商责取消数', '商责超时数', '商责退单数', '商责催单数', '商责取消率', '商责超时率', '商责退单率', '商责催单率', '店铺评分', '满意度得分', '味道得分', '包装得分', '好评率', '好评数', '中评率', '中评数', '差评率', '差评数', '优质评价率', '优质评价数', '订单评价率', '订单评价数', '差评人工回复率']
  68. #
  69. # a = Counter(a)
  70. # print(a)
  71. # b = Counter(b)
  72. # print(b)
  73. # print (dict(a)==dict(b))
  74. # version = 'V234'
  75. # num = version.split('V')[1]
  76. # new_version = 'V'+str(int(num)+1)
  77. # print(new_version)
  78. if 1==0:
  79. timestamp = str(int(time.time()))
  80. random = str(random.randint(1,10000))
  81. corpId = 'wp4nJkEAAAXE4BbTWEYSdyrX_-fl8vqA'
  82. secret = 'fc092ce3365b4da39c682298f6b8684e'
  83. account = 'wo4nJkEAAARtG8M8Ig6n6r5eyQ1Vj1ow'
  84. headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36'}
  85. url_AccessKey = f'https://qiqiao.do1.com.cn/plus/cgi-bin/securities/access_key?timestamp={timestamp}&random={random}&corpId={corpId}&secret={secret}&account={account}'
  86. resp_AccessKey = requests.get(url_AccessKey,headers= headers).json()
  87. accessKey = resp_AccessKey['data']
  88. url_Token = f'https://qiqiao.do1.com.cn/plus/cgi-bin/securities/qiqiao_token?timestamp={timestamp}&random={random}&corpId={corpId}&secret={secret}&account={account}&accessKey={accessKey}'
  89. resp_Token = requests.get(url_Token,headers= headers).json()
  90. # print(requests.get(url_Token,headers= headers).text)
  91. Token = resp_Token['data']
  92. print(Token)
  93. #
  94. Token = '6812266045168435200'
  95. if 1 == 0:
  96. headers_api = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.85 Safari/537.36',
  97. "Content-type": "application/json",
  98. "X-Auth0-Token": Token}
  99. applicationId = '62cfb64cb433f27040103cca' #测试
  100. formModelId = '630f1fb798c136661c4d03a6' #文件上传下载
  101. row_id = '6812277521321132032'
  102. shutil.rmtree (save_path)
  103. shutil.rmtree (out_path_forms)
  104. shutil.rmtree (out_path_goods)
  105. os.mkdir (save_path)
  106. os.mkdir (out_path_forms)
  107. os.mkdir (out_path_goods)
  108. url_select = f'https://qiqiao.do1.com.cn/plus/cgi-bin/open/applications/{applicationId}/forms/{formModelId}/{row_id}'
  109. resp_chaxun = requests.get(url_select, headers=headers_api).json()
  110. print(resp_chaxun)
  111. input_files = resp_chaxun['data']['variables']['输入文件']
  112. for file_info in input_files:
  113. fileId = file_info['fileId']
  114. name = str(file_info['name'])
  115. # url = f'https://qiqiao.do1.com.cn/plus/cgi-bin/open/file_download/applications/{applicationId}/form_models/{formModelId}/documents/{row_id}'
  116. # params = 'needDownloadFieldName=输入文件'
  117. # resp = requests.get(url,headers=headers_api,params=params).json()
  118. # fileId = resp['data']['fileId']
  119. # print(fileId)
  120. url_down = f'https://qiqiao.do1.com.cn/plus/cgi-bin/open/file_download/applications/{applicationId}/files/{fileId}'
  121. resp_down = requests.get(url_down,headers=headers_api).content
  122. save_path = r'C:\Users\ClownHe\Desktop\goods'
  123. with open (save_path + r'\%s' % str (name), mode='wb') as f:
  124. f.write (resp_down)
  125. print(name,'下载成功')
  126. # file_name = r'C:\Users\ClownHe\Desktop\goods\6812277521321132032\elmsave'
  127. #
  128. # for a, b, files in os.walk(file_name, topdown=False):
  129. # print(len(files),a,b)
  130. # if len(files) == 0:
  131. # ...
  132. # else:
  133. # for file in files:
  134. # print(file)
  135. # a= ['1','0','1']
  136. # a = []
  137. # from chardet.universaldetector import UniversalDetector
  138. # file_name = 'C:/Users/ClownHe/Desktop/goods/test'
  139. # for a, b, files in os.walk(file_name, topdown=False):
  140. # for file in files:
  141. # with open (file_name + '/' + file, 'rb') as f:
  142. # detector = UniversalDetector ()
  143. # for line in f.readlines ():
  144. # print(line)
  145. # detector.feed (line)
  146. # if detector.done:
  147. # break
  148. # # detector.close ()
  149. # # line = f.readlines ()[0]
  150. # # print(line)
  151. # # detector.feed (line)
  152. # # detector.close()
  153. # en = detector.result['encoding']
  154. # print(en)
  155. # import os
  156. # from chardet.universaldetector import UniversalDetector
  157. #
  158. # def get_filelist(path):
  159. # """
  160. # 获取路径下所有csv文件的路径列表
  161. # """
  162. # Filelist = []
  163. # for home, dirs, files in os.walk(path):
  164. # for filename in files:
  165. # if ".csv" in filename:
  166. # Filelist.append(os.path.join(home, filename))
  167. # return Filelist
  168. #
  169. # def read_file(file):
  170. # """
  171. # 逐个读取文件的内容
  172. # """
  173. # with open(file, 'rb') as f:
  174. # return f.read()
  175. #
  176. # def get_encode_info(file):
  177. # """
  178. # 逐个读取文件的编码方式
  179. # """
  180. # with open(file, 'rb') as f:
  181. # detector = UniversalDetector()
  182. # for line in f.readlines():
  183. # detector.feed(line)
  184. # if detector.done:
  185. # break
  186. # detector.close()
  187. # return detector.result['encoding']
  188. #
  189. # def convert_encode2utf8(file, original_encode, des_encode):
  190. # """
  191. # 将文件的编码方式转换为utf-8,并写入原先的文件中。
  192. # """
  193. # file_content = read_file(file)
  194. # file_decode = file_content.decode(original_encode, 'ignore')
  195. # file_encode = file_decode.encode(des_encode)
  196. # with open(file, 'wb') as f:
  197. # f.write(file_encode)
  198. #
  199. # def read_and_convert(path):
  200. # """
  201. # 读取文件并转换
  202. # """
  203. # Filelist = get_filelist(path=path)
  204. # fileNum= 0
  205. # for filename in Filelist:
  206. # try:
  207. # file_content = read_file(filename)
  208. # encode_info = get_encode_info(filename)
  209. # if encode_info != 'utf-8':
  210. # fileNum +=1
  211. # convert_encode2utf8(filename, encode_info, 'utf-8')
  212. # print('成功转换 %s 个文件 %s '%(fileNum,filename))
  213. # except BaseException:
  214. # print(filename,'存在问题,请检查!')
  215. #
  216. # def recheck_again(path):
  217. # """
  218. # 再次判断文件是否为utf-8
  219. # """
  220. # print('---------------------以下文件仍存在问题---------------------')
  221. # Filelist = get_filelist(path)
  222. # for filename in Filelist:
  223. # encode_info_ch = get_encode_info(filename)
  224. # if encode_info_ch != 'utf-8':
  225. # print(filename,'的编码方式是:',encode_info_ch)
  226. #
  227. # print('--------------------------检查结束--------------------------')
  228. # if __name__ == "__main__":
  229. # """
  230. # 输入文件路径
  231. # """
  232. # path = 'C:/Users/ClownHe/Desktop/goods/test'
  233. # read_and_convert(path)
  234. # recheck_again(path)
  235. # print('转换结束!')
  236. # for i in range(60):
  237. # print(i)
  238. # time.sleep (0.5)
  239. # url = 'http://fg94wp.natappfree.cc/'
  240. # name = f'这是mypc{i}'
  241. # json = {'name':name}
  242. # re = requests.post(url,json=json).text
  243. # print(re)
  244. # time.sleep(5)
  245. # for i in range(60):
  246. # a = 59+i
  247. # print(a)
  248. # url = 'http://fg94wp.natappfree.cc/'
  249. # name = f'这是{a}'
  250. # json = {'name':name}
  251. # requests.post(url,json=json)
  252. # re1 = requests.post (url, json=json).text
  253. # print (re1)
  254. # time.sleep(3)
  255. # for i in range(60):
  256. # b= 119 + i
  257. # print(b)
  258. # url = 'http://fg94wp.natappfree.cc/'
  259. # name = f'这是{b}'
  260. # json = {'name':name}
  261. # requests.post(url,json=json)
  262. # a = time.time()
  263. # for i in range(1000000000):
  264. # ...
  265. # print(time.time()-a)
  266. # a = [1,2,3,4,4,""]
  267. # for b in a:
  268. # if b not in [4,""]:
  269. # print(b)
  270. #
  271. # a = {'data':{'a':''}}
  272. # a['data']['c'] = 1
  273. # import json
  274. # print(json.dumps(a,ensure_ascii=False))
  275. a = [1,2,3,4,5,6,7,8,9,0]
  276. # n = len(a)
  277. # cnt = 3
  278. #
  279. # s = n//cnt
  280. # r = n%cnt
  281. # if r >0:
  282. # s = s+1
  283. # out = []
  284. # for i in range(s):
  285. # b = a[(i)*cnt:(i+1)*cnt]
  286. # out.append(b)
  287. # print(out)
  288. # a_resp = []
  289. # b_sql = [2]
  290. # update = list(set(a_resp).intersection(set(b_sql)))
  291. # insert = list(set(a_resp).difference(set(b_sql)))
  292. # delete = list(set(b_sql).difference(set(a_resp)))
  293. # print(update,insert,delete)
  294. #
  295. #
  296. # c = ['肠粉1','肠粉2','肠粉3','肠粉4','肠粉5']
  297. # a = ','.join(c)
  298. # print(a)
  299. # a = 'a'
  300. # b = a.split(',')
  301. # print(b)
  302. if 1==0:
  303. c = [1,2,3,0,5,6,7,9]
  304. for i in c:
  305. try:
  306. b = 1/i
  307. except Exception as e:
  308. print(e)
  309. continue
  310. print(i,b)
  311. import functools
  312. def retry(func):
  313. @functools.wraps(func)
  314. def wrapper(*args, **kwargs):
  315. while True:
  316. try:
  317. return func(*args, **kwargs)
  318. except Exception as e:
  319. print(f"Error occurred: {e}")
  320. print("Retrying...")
  321. return wrapper
  322. @retry
  323. def my_function(b):
  324. a = 1/b
  325. return a
  326. # my_function(0)
  327. def aa(func):
  328. def b(*args,**kwargs):
  329. n = 0
  330. while n < 3:
  331. try:
  332. func (*args, **kwargs)
  333. d = 'yes'
  334. n = 4
  335. except:
  336. d = 'no'
  337. n += 1
  338. print (d,n)
  339. return b
  340. # @aa
  341. # def c(a):
  342. # c = 1/a
  343. # print(f'结果{c}')
  344. #
  345. # for i in [1,0,9,0,4]:
  346. # print(f'这是{i}')
  347. # c(i)