diff --git a/doc/更新日志.md b/doc/更新日志.md index c49789e..5aff2c6 100644 --- a/doc/更新日志.md +++ b/doc/更新日志.md @@ -1,6 +1,7 @@ ###### 2023/11/18 - [X] 修复腾云驾雾 - [X] 增加手动升级功能,手动上传升级文件.zip后进行强制升级即可 +- [X] 在线升级增加后端的下载进度打印。9001可查看 ###### 2023/10/04 - [X] 关于rsa加解密的研究,js模块导入方式研究失败。请用蜜蜂壳子注入的函数实现 diff --git a/js/version.txt b/js/version.txt index 13312d7..4fb7105 100644 --- a/js/version.txt +++ b/js/version.txt @@ -1 +1 @@ -3.9.48beta24 \ No newline at end of file +3.9.48beta25 \ No newline at end of file diff --git a/utils/download_progress.py b/utils/download_progress.py index 99e23bf..64bf729 100644 --- a/utils/download_progress.py +++ b/utils/download_progress.py @@ -7,10 +7,11 @@ import os import time -import logging +# import logging import requests from urllib.parse import unquote from contextlib import closing +from log import logger chunkSize = 1024 * 1024 loop = 5 @@ -27,7 +28,8 @@ def speed_handle(process, file_length): '□' * round((1 - num) * 20)) else: progress = ' \033[1;33m{}\033[0m% |{}|'.format(100, '■' * 50) - print(progress, flush=True, end='') + # print(progress, flush=True, end='') + logger.info(progress) def get_file_name(url, headers): @@ -47,8 +49,11 @@ def get_file_name(url, headers): def file_download(fileUrl, filePath): - response = requests.get(fileUrl, headers=headers, stream=True) + if os.path.exists(filePath): + os.remove(filePath) + response = requests.get(fileUrl, headers=headers, stream=True, verify=False) fileSize = int(response.headers['content-length']) # 文件大小 + logger.info(f'fileSize:{fileSize}') tmpSize = 0 n = 0 @@ -71,24 +76,34 @@ def file_download(fileUrl, filePath): if remainSize > 0: - with closing(requests.get(fileUrl, headers=_headers, stream=True)) as _response, open(filePath, - "ab") as file: + with closing(requests.get(fileUrl, headers=_headers, stream=True, verify=False)) as _response, open( + filePath, + "ab") as file: for content in _response.iter_content(chunk_size=chunkSize): file.write(content) timeTook = time.perf_counter() - st contentSize += len(content) / chunkSize - print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='') + # print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='') + # logger.info('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename)) + logger.info(f'文件{filename}下载中...') speed_handle(contentSize + tmpSize / chunkSize, fileSize / chunkSize) downloadSpeed = contentSize / timeTook # 平均下载速度 remainingTime = int(timeTook / (contentSize / remainSize) - timeTook) # 估计剩余下载时间 - print( + # print( + # '[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format( + # downloadSpeed, + # remainingTime, + # fileSize / chunkSize) + ']', flush=True, end=' ' + # ) + + logger.info( '[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format( downloadSpeed, remainingTime, - fileSize / chunkSize) + ']', flush=True, end=' ' - ) + fileSize / chunkSize) + ']' + ) else: isDownloaded = True break @@ -98,49 +113,62 @@ def file_download(fileUrl, filePath): return isDownloaded -if __name__ == '__main__': +def file_downloads(files, save_path='download'): + """ + files = [{'url':'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip','name':'dr_py.zip'}] + :param save_path: + :param files: + :return: + """ + # save_path = 'tmp' + os.makedirs(save_path, exist_ok=True) - urlTxt = './url.txt' - - pathSave = '/data2/sam_down' - os.makedirs(pathSave, exist_ok=True) - - logging.basicConfig(level=logging.INFO, filename='downloading.log', filemode='a', format="%(message)s") + # logging.basicConfig(level=logging.INFO, filename='download/downloading.log', filemode='a', format="%(message)s") localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) - logging.info(localtime + ': Start downloading task: {}'.format(urlTxt)) + logger.info(localtime + ': Start downloading task: {}'.format(files)) failedUrl = [] - with open(urlTxt, "r") as f: - fileUrls = [line.strip() for line in f.readlines()] + for cnt, file in enumerate(files): + fileUrl = file.get('url') + if not fileUrl: + print('file error:no url') + continue + fileName = file.get('name') + filename = fileName or get_file_name(fileUrl, headers) # 获取文件名称 + logger.info(f'开始下载{filename}: {fileUrl}') + try: + t0 = time.perf_counter() + isDload = file_download(fileUrl, os.path.join(save_path, filename)) + t1 = time.perf_counter() + localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) - for cnt, fileUrl in enumerate(fileUrls): - - filename = get_file_name(fileUrl, headers) # 获取文件名称 - - try: - t0 = time.perf_counter() - isDload = file_download(fileUrl, os.path.join(pathSave, filename)) - t1 = time.perf_counter() - localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) - - if isDload: - logging.info( - localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0)) - else: - # os.remove(os.path.join(pathSave, filename)) - logging.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl)) - failedUrl.append(fileUrl) - - except: + if isDload: + logger.info( + localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0)) + else: + logger.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl)) failedUrl.append(fileUrl) - if len(failedUrl): + except: + failedUrl.append(fileUrl) - with open('failedUrl.txt', 'w') as p: + if len(failedUrl): + with open(os.path.join(save_path, 'failedUrl.txt'), 'w') as p: for url in failedUrl: p.write(url + '\n') fn = len(failedUrl) - sn = len(fileUrls) - fn - print( - "\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1))) \ No newline at end of file + sn = len(files) - fn + # print("\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1))) + logger.info( + "\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1))) + + +if __name__ == '__main__': + # urlTxt = 'download/urls.txt' + # with open(urlTxt, "r") as f: + # fileUrls = [line.strip() for line in f.readlines()] + + files = [{'url': 'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip', + 'name': 'dr_py.zip'}] + file_downloads(files, 'tmp') diff --git a/utils/update.py b/utils/update.py index 9abd51f..04694aa 100644 --- a/utils/update.py +++ b/utils/update.py @@ -9,66 +9,75 @@ import sys import requests import os import zipfile -import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581 +import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581 from utils.log import logger +from utils.download_progress import file_downloads from utils.web import get_interval from utils.htmlParser import jsoup import ujson headers = { - 'Referer': 'https://gitcode.net/', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', + 'Referer': 'https://gitcode.net/', + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', } -def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg',size=0): + +def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg', size=0): jsp = jsoup(url) pdfh = jsp.pdfh pdfa = jsp.pdfa pd = jsp.pd try: - r = requests.get(url,headers=headers,timeout=2) + r = requests.get(url, headers=headers, timeout=2) html = r.text - data = pdfa(html,'ul.hot-list&&li') - suggs = [{'title':pdfh(dt,'a&&Text'),'url':pd(dt,'a&&href')} for dt in data] + data = pdfa(html, 'ul.hot-list&&li') + suggs = [{'title': pdfh(dt, 'a&&Text'), 'url': pd(dt, 'a&&href')} for dt in data] # print(html) # print(suggs) return suggs except: return [] -def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',size=0): + +def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp', + size=0): size = int(size) if size else 50 - pdata = ujson.dumps({"pageNum":0,"pageSize":size}) + pdata = ujson.dumps({"pageNum": 0, "pageSize": size}) try: - r = requests.post(url,headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', 'content-type': 'application/json'},data=pdata,timeout=2) + r = requests.post(url, headers={ + 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', + 'content-type': 'application/json'}, data=pdata, timeout=2) html = r.json() # print(html) data = html['data']['navItemList'][0]['hotRankResult']['rankItemList'] - suggs = [{'title':dt['title'],'url':dt['url']} for dt in data] + suggs = [{'title': dt['title'], 'url': dt['url']} for dt in data] # print(html) # print(suggs) return suggs except: return [] -def getHotSuggest(s_from,size): + +def getHotSuggest(s_from, size): if s_from == 'sougou': return getHotSuggest1(size=size) else: return getHotSuggest2(size=size) + def getLocalVer(): base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 version_path = os.path.join(base_path, f'js/version.txt') if not os.path.exists(version_path): - with open(version_path,mode='w+',encoding='utf-8') as f: + with open(version_path, mode='w+', encoding='utf-8') as f: version = '1.0.0' f.write(version) else: - with open(version_path,encoding='utf-8') as f: + with open(version_path, encoding='utf-8') as f: version = f.read() return version + def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'): ver = '1.0.1' msg = '' @@ -79,17 +88,18 @@ def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'): # r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',timeout=(2,2)) url = f'{update_proxy}https://raw.githubusercontent.com/hjdhnx/dr_py/main/js/version.txt' logger.info(f'开始检查线上版本号:{url}') - r = requests.get(url,headers=headers,timeout=(2,2),verify=False) + r = requests.get(url, headers=headers, timeout=(2, 2), verify=False) ver = r.text except Exception as e: # print(f'{e}') msg = f'{e}' logger.info(msg) - return ver,msg + return ver, msg + def checkUpdate(): local_ver = getLocalVer() - online_ver,msg = getOnlineVer() + online_ver, msg = getOnlineVer() if local_ver != online_ver: return True return False @@ -112,6 +122,7 @@ def del_file(filepath): except Exception as e: logger.info(f'删除{file_path}发生错误:{e}') + def copytree(src, dst, ignore=None): if ignore is None: ignore = [] @@ -123,11 +134,11 @@ def copytree(src, dst, ignore=None): if os.path.isdir(from_dir): # 判断是否为文件夹 if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建 os.mkdir(to_dir) - copytree(from_dir, to_dir,ignore) # 迭代 遍历子文件夹并复制文件 + copytree(from_dir, to_dir, ignore) # 迭代 遍历子文件夹并复制文件 elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件 if ignore: - regxp = '|'.join(ignore).replace('\\','/') # 组装正则 - to_dir_str = str(to_dir).replace('\\','/') + regxp = '|'.join(ignore).replace('\\', '/') # 组装正则 + to_dir_str = str(to_dir).replace('\\', '/') if not re.search(rf'{regxp}', to_dir_str, re.M): shutil.copy(from_dir, to_dir) # 复制文件 else: @@ -140,19 +151,20 @@ def force_copy_files(from_path, to_path, exclude_files=None): exclude_files = [] logger.info(f'开始拷贝文件{from_path}=>{to_path}') if not os.path.exists(to_path): - os.makedirs(to_path,exist_ok=True) + os.makedirs(to_path, exist_ok=True) try: if sys.version_info < (3, 8): - copytree(from_path, to_path,exclude_files) + copytree(from_path, to_path, exclude_files) else: if len(exclude_files) > 0: - shutil.copytree(from_path, to_path, dirs_exist_ok=True,ignore=shutil.ignore_patterns(*exclude_files)) + shutil.copytree(from_path, to_path, dirs_exist_ok=True, ignore=shutil.ignore_patterns(*exclude_files)) else: shutil.copytree(from_path, to_path, dirs_exist_ok=True) except Exception as e: logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}') + def copy_to_update(): base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 tmp_path = os.path.join(base_path, f'tmp') @@ -173,10 +185,10 @@ def copy_to_update(): logger.info(f'升级过程中清理jsd文件发生错误:{e}') # 千万不能覆盖super,base - paths = ['js','models','controllers','libs','static','templates','utils','txt','jiexi','py','whl','doc'] - exclude_files = ['txt/pycms0.json','txt/pycms1.json','txt/pycms2.json','base/rules.db'] + paths = ['js', 'models', 'controllers', 'libs', 'static', 'templates', 'utils', 'txt', 'jiexi', 'py', 'whl', 'doc'] + exclude_files = ['txt/pycms0.json', 'txt/pycms1.json', 'txt/pycms2.json', 'base/rules.db'] for path in paths: - force_copy_files(os.path.join(dr_path, path), os.path.join(base_path, path),exclude_files) + force_copy_files(os.path.join(dr_path, path), os.path.join(base_path, path), exclude_files) try: shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件 shutil.copy(os.path.join(dr_path, 'requirements.txt'), os.path.join(base_path, 'requirements.txt')) # 复制文件 @@ -185,13 +197,14 @@ def copy_to_update(): logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖') return True + def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'): update_proxy = (update_proxy or '').strip() logger.info(f'update_proxy:{update_proxy}') t1 = getTime() base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 tmp_path = os.path.join(base_path, f'tmp') - os.makedirs(tmp_path,exist_ok=True) + os.makedirs(tmp_path, exist_ok=True) # url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip' # url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip' url = f'{update_proxy}https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip' @@ -203,13 +216,18 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'): try: # print(f'开始下载:{url}') logger.info(f'开始下载:{url}') - r = requests.get(url,headers=headers,timeout=(20,20),verify=False) - rb = r.content download_path = os.path.join(tmp_path, 'dr_py.zip') - # 保存文件前清空目录 - del_file(tmp_path) - with open(download_path,mode='wb+') as f: - f.write(rb) + + # r = requests.get(url, headers=headers, timeout=(20, 20), verify=False) + # rb = r.content + # # 保存文件前清空目录 + # del_file(tmp_path) + # with open(download_path,mode='wb+') as f: + # f.write(rb) + + # 2023/11/18 改为带进度条的下载 + file_downloads([{'url': url, 'name': 'dr_py.zip'}], tmp_path) + # print(f'开始解压文件:{download_path}') logger.info(f'开始解压文件:{download_path}') f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置 @@ -227,22 +245,23 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'): logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒') return msg -def download_lives(live_url:str): + +def download_lives(live_url: str): t1 = getTime() base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 live_path = os.path.join(base_path, f'base/直播.txt') logger.info(f'尝试同步{live_url}远程内容到{live_path}') try: - r = requests.get(live_url,headers=headers,timeout=3) + r = requests.get(live_url, headers=headers, timeout=3) auto_encoding = r.apparent_encoding - if auto_encoding.lower() in ['utf-8','gbk','bg2312','gb18030']: + if auto_encoding.lower() in ['utf-8', 'gbk', 'bg2312', 'gb18030']: r.encoding = auto_encoding # print(r.encoding) html = r.text # print(len(html)) - if re.search('cctv|.m3u8',html,re.M|re.I) and len(html) > 1000: + if re.search('cctv|.m3u8', html, re.M | re.I) and len(html) > 1000: logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒') - with open(live_path,mode='w+',encoding='utf-8') as f: + with open(live_path, mode='w+', encoding='utf-8') as f: f.write(html) return True else: @@ -250,4 +269,4 @@ def download_lives(live_url:str): return False except Exception as e: logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}') - return False \ No newline at end of file + return False