后端下载进度条打印

This commit is contained in:
hjdhnx 2023-11-18 15:46:54 +08:00
parent da1ecbabcd
commit ed9df94742
4 changed files with 131 additions and 83 deletions

View File

@ -1,6 +1,7 @@
###### 2023/11/18 ###### 2023/11/18
- [X] 修复腾云驾雾 - [X] 修复腾云驾雾
- [X] 增加手动升级功能,手动上传升级文件.zip后进行强制升级即可 - [X] 增加手动升级功能,手动上传升级文件.zip后进行强制升级即可
- [X] 在线升级增加后端的下载进度打印。9001可查看
###### 2023/10/04 ###### 2023/10/04
- [X] 关于rsa加解密的研究js模块导入方式研究失败。请用蜜蜂壳子注入的函数实现 - [X] 关于rsa加解密的研究js模块导入方式研究失败。请用蜜蜂壳子注入的函数实现

View File

@ -1 +1 @@
3.9.48beta24 3.9.48beta25

View File

@ -7,10 +7,11 @@
import os import os
import time import time
import logging # import logging
import requests import requests
from urllib.parse import unquote from urllib.parse import unquote
from contextlib import closing from contextlib import closing
from log import logger
chunkSize = 1024 * 1024 chunkSize = 1024 * 1024
loop = 5 loop = 5
@ -27,7 +28,8 @@ def speed_handle(process, file_length):
'' * round((1 - num) * 20)) '' * round((1 - num) * 20))
else: else:
progress = ' \033[1;33m{}\033[0m% |{}|'.format(100, '' * 50) progress = ' \033[1;33m{}\033[0m% |{}|'.format(100, '' * 50)
print(progress, flush=True, end='') # print(progress, flush=True, end='')
logger.info(progress)
def get_file_name(url, headers): def get_file_name(url, headers):
@ -47,8 +49,11 @@ def get_file_name(url, headers):
def file_download(fileUrl, filePath): def file_download(fileUrl, filePath):
response = requests.get(fileUrl, headers=headers, stream=True) if os.path.exists(filePath):
os.remove(filePath)
response = requests.get(fileUrl, headers=headers, stream=True, verify=False)
fileSize = int(response.headers['content-length']) # 文件大小 fileSize = int(response.headers['content-length']) # 文件大小
logger.info(f'fileSize:{fileSize}')
tmpSize = 0 tmpSize = 0
n = 0 n = 0
@ -71,23 +76,33 @@ def file_download(fileUrl, filePath):
if remainSize > 0: if remainSize > 0:
with closing(requests.get(fileUrl, headers=_headers, stream=True)) as _response, open(filePath, with closing(requests.get(fileUrl, headers=_headers, stream=True, verify=False)) as _response, open(
filePath,
"ab") as file: "ab") as file:
for content in _response.iter_content(chunk_size=chunkSize): for content in _response.iter_content(chunk_size=chunkSize):
file.write(content) file.write(content)
timeTook = time.perf_counter() - st timeTook = time.perf_counter() - st
contentSize += len(content) / chunkSize contentSize += len(content) / chunkSize
print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='') # print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='')
# logger.info('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename))
logger.info(f'文件{filename}下载中...')
speed_handle(contentSize + tmpSize / chunkSize, fileSize / chunkSize) speed_handle(contentSize + tmpSize / chunkSize, fileSize / chunkSize)
downloadSpeed = contentSize / timeTook # 平均下载速度 downloadSpeed = contentSize / timeTook # 平均下载速度
remainingTime = int(timeTook / (contentSize / remainSize) - timeTook) # 估计剩余下载时间 remainingTime = int(timeTook / (contentSize / remainSize) - timeTook) # 估计剩余下载时间
print( # print(
# '[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format(
# downloadSpeed,
# remainingTime,
# fileSize / chunkSize) + ']', flush=True, end=' '
# )
logger.info(
'[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format( '[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format(
downloadSpeed, downloadSpeed,
remainingTime, remainingTime,
fileSize / chunkSize) + ']', flush=True, end=' ' fileSize / chunkSize) + ']'
) )
else: else:
isDownloaded = True isDownloaded = True
@ -98,49 +113,62 @@ def file_download(fileUrl, filePath):
return isDownloaded return isDownloaded
if __name__ == '__main__': def file_downloads(files, save_path='download'):
"""
files = [{'url':'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip','name':'dr_py.zip'}]
:param save_path:
:param files:
:return:
"""
# save_path = 'tmp'
os.makedirs(save_path, exist_ok=True)
urlTxt = './url.txt' # logging.basicConfig(level=logging.INFO, filename='download/downloading.log', filemode='a', format="%(message)s")
pathSave = '/data2/sam_down'
os.makedirs(pathSave, exist_ok=True)
logging.basicConfig(level=logging.INFO, filename='downloading.log', filemode='a', format="%(message)s")
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
logging.info(localtime + ': Start downloading task: {}'.format(urlTxt)) logger.info(localtime + ': Start downloading task: {}'.format(files))
failedUrl = [] failedUrl = []
with open(urlTxt, "r") as f: for cnt, file in enumerate(files):
fileUrls = [line.strip() for line in f.readlines()] fileUrl = file.get('url')
if not fileUrl:
for cnt, fileUrl in enumerate(fileUrls): print('file error:no url')
continue
filename = get_file_name(fileUrl, headers) # 获取文件名称 fileName = file.get('name')
filename = fileName or get_file_name(fileUrl, headers) # 获取文件名称
logger.info(f'开始下载{filename}: {fileUrl}')
try: try:
t0 = time.perf_counter() t0 = time.perf_counter()
isDload = file_download(fileUrl, os.path.join(pathSave, filename)) isDload = file_download(fileUrl, os.path.join(save_path, filename))
t1 = time.perf_counter() t1 = time.perf_counter()
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if isDload: if isDload:
logging.info( logger.info(
localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0)) localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0))
else: else:
# os.remove(os.path.join(pathSave, filename)) logger.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl))
logging.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl))
failedUrl.append(fileUrl) failedUrl.append(fileUrl)
except: except:
failedUrl.append(fileUrl) failedUrl.append(fileUrl)
if len(failedUrl): if len(failedUrl):
with open(os.path.join(save_path, 'failedUrl.txt'), 'w') as p:
with open('failedUrl.txt', 'w') as p:
for url in failedUrl: for url in failedUrl:
p.write(url + '\n') p.write(url + '\n')
fn = len(failedUrl) fn = len(failedUrl)
sn = len(fileUrls) - fn sn = len(files) - fn
print( # print("\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1)))
logger.info(
"\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1))) "\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1)))
if __name__ == '__main__':
# urlTxt = 'download/urls.txt'
# with open(urlTxt, "r") as f:
# fileUrls = [line.strip() for line in f.readlines()]
files = [{'url': 'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip',
'name': 'dr_py.zip'}]
file_downloads(files, 'tmp')

View File

@ -11,6 +11,7 @@ import os
import zipfile import zipfile
import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581 import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
from utils.log import logger from utils.log import logger
from utils.download_progress import file_downloads
from utils.web import get_interval from utils.web import get_interval
from utils.htmlParser import jsoup from utils.htmlParser import jsoup
import ujson import ujson
@ -20,6 +21,7 @@ headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
} }
def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg', size=0): def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg', size=0):
jsp = jsoup(url) jsp = jsoup(url)
pdfh = jsp.pdfh pdfh = jsp.pdfh
@ -36,11 +38,15 @@ def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg',size=0):
except: except:
return [] return []
def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',size=0):
def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',
size=0):
size = int(size) if size else 50 size = int(size) if size else 50
pdata = ujson.dumps({"pageNum": 0, "pageSize": size}) pdata = ujson.dumps({"pageNum": 0, "pageSize": size})
try: try:
r = requests.post(url,headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', 'content-type': 'application/json'},data=pdata,timeout=2) r = requests.post(url, headers={
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
'content-type': 'application/json'}, data=pdata, timeout=2)
html = r.json() html = r.json()
# print(html) # print(html)
data = html['data']['navItemList'][0]['hotRankResult']['rankItemList'] data = html['data']['navItemList'][0]['hotRankResult']['rankItemList']
@ -51,12 +57,14 @@ def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.
except: except:
return [] return []
def getHotSuggest(s_from, size): def getHotSuggest(s_from, size):
if s_from == 'sougou': if s_from == 'sougou':
return getHotSuggest1(size=size) return getHotSuggest1(size=size)
else: else:
return getHotSuggest2(size=size) return getHotSuggest2(size=size)
def getLocalVer(): def getLocalVer():
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
version_path = os.path.join(base_path, f'js/version.txt') version_path = os.path.join(base_path, f'js/version.txt')
@ -69,6 +77,7 @@ def getLocalVer():
version = f.read() version = f.read()
return version return version
def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'): def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
ver = '1.0.1' ver = '1.0.1'
msg = '' msg = ''
@ -87,6 +96,7 @@ def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
logger.info(msg) logger.info(msg)
return ver, msg return ver, msg
def checkUpdate(): def checkUpdate():
local_ver = getLocalVer() local_ver = getLocalVer()
online_ver, msg = getOnlineVer() online_ver, msg = getOnlineVer()
@ -112,6 +122,7 @@ def del_file(filepath):
except Exception as e: except Exception as e:
logger.info(f'删除{file_path}发生错误:{e}') logger.info(f'删除{file_path}发生错误:{e}')
def copytree(src, dst, ignore=None): def copytree(src, dst, ignore=None):
if ignore is None: if ignore is None:
ignore = [] ignore = []
@ -153,6 +164,7 @@ def force_copy_files(from_path, to_path, exclude_files=None):
except Exception as e: except Exception as e:
logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}') logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
def copy_to_update(): def copy_to_update():
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
tmp_path = os.path.join(base_path, f'tmp') tmp_path = os.path.join(base_path, f'tmp')
@ -185,6 +197,7 @@ def copy_to_update():
logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖') logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
return True return True
def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'): def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
update_proxy = (update_proxy or '').strip() update_proxy = (update_proxy or '').strip()
logger.info(f'update_proxy:{update_proxy}') logger.info(f'update_proxy:{update_proxy}')
@ -203,13 +216,18 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
try: try:
# print(f'开始下载:{url}') # print(f'开始下载:{url}')
logger.info(f'开始下载:{url}') logger.info(f'开始下载:{url}')
r = requests.get(url,headers=headers,timeout=(20,20),verify=False)
rb = r.content
download_path = os.path.join(tmp_path, 'dr_py.zip') download_path = os.path.join(tmp_path, 'dr_py.zip')
# 保存文件前清空目录
del_file(tmp_path) # r = requests.get(url, headers=headers, timeout=(20, 20), verify=False)
with open(download_path,mode='wb+') as f: # rb = r.content
f.write(rb) # # 保存文件前清空目录
# del_file(tmp_path)
# with open(download_path,mode='wb+') as f:
# f.write(rb)
# 2023/11/18 改为带进度条的下载
file_downloads([{'url': url, 'name': 'dr_py.zip'}], tmp_path)
# print(f'开始解压文件:{download_path}') # print(f'开始解压文件:{download_path}')
logger.info(f'开始解压文件:{download_path}') logger.info(f'开始解压文件:{download_path}')
f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置 f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
@ -227,6 +245,7 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒') logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
return msg return msg
def download_lives(live_url: str): def download_lives(live_url: str):
t1 = getTime() t1 = getTime()
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录 base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录