后端下载进度条打印
This commit is contained in:
parent
da1ecbabcd
commit
ed9df94742
@ -1,6 +1,7 @@
|
|||||||
###### 2023/11/18
|
###### 2023/11/18
|
||||||
- [X] 修复腾云驾雾
|
- [X] 修复腾云驾雾
|
||||||
- [X] 增加手动升级功能,手动上传升级文件.zip后进行强制升级即可
|
- [X] 增加手动升级功能,手动上传升级文件.zip后进行强制升级即可
|
||||||
|
- [X] 在线升级增加后端的下载进度打印。9001可查看
|
||||||
|
|
||||||
###### 2023/10/04
|
###### 2023/10/04
|
||||||
- [X] 关于rsa加解密的研究,js模块导入方式研究失败。请用蜜蜂壳子注入的函数实现
|
- [X] 关于rsa加解密的研究,js模块导入方式研究失败。请用蜜蜂壳子注入的函数实现
|
||||||
|
|||||||
@ -1 +1 @@
|
|||||||
3.9.48beta24
|
3.9.48beta25
|
||||||
@ -7,10 +7,11 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import logging
|
# import logging
|
||||||
import requests
|
import requests
|
||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
|
from log import logger
|
||||||
|
|
||||||
chunkSize = 1024 * 1024
|
chunkSize = 1024 * 1024
|
||||||
loop = 5
|
loop = 5
|
||||||
@ -27,7 +28,8 @@ def speed_handle(process, file_length):
|
|||||||
'□' * round((1 - num) * 20))
|
'□' * round((1 - num) * 20))
|
||||||
else:
|
else:
|
||||||
progress = ' \033[1;33m{}\033[0m% |{}|'.format(100, '■' * 50)
|
progress = ' \033[1;33m{}\033[0m% |{}|'.format(100, '■' * 50)
|
||||||
print(progress, flush=True, end='')
|
# print(progress, flush=True, end='')
|
||||||
|
logger.info(progress)
|
||||||
|
|
||||||
|
|
||||||
def get_file_name(url, headers):
|
def get_file_name(url, headers):
|
||||||
@ -47,8 +49,11 @@ def get_file_name(url, headers):
|
|||||||
|
|
||||||
|
|
||||||
def file_download(fileUrl, filePath):
|
def file_download(fileUrl, filePath):
|
||||||
response = requests.get(fileUrl, headers=headers, stream=True)
|
if os.path.exists(filePath):
|
||||||
|
os.remove(filePath)
|
||||||
|
response = requests.get(fileUrl, headers=headers, stream=True, verify=False)
|
||||||
fileSize = int(response.headers['content-length']) # 文件大小
|
fileSize = int(response.headers['content-length']) # 文件大小
|
||||||
|
logger.info(f'fileSize:{fileSize}')
|
||||||
|
|
||||||
tmpSize = 0
|
tmpSize = 0
|
||||||
n = 0
|
n = 0
|
||||||
@ -71,24 +76,34 @@ def file_download(fileUrl, filePath):
|
|||||||
|
|
||||||
if remainSize > 0:
|
if remainSize > 0:
|
||||||
|
|
||||||
with closing(requests.get(fileUrl, headers=_headers, stream=True)) as _response, open(filePath,
|
with closing(requests.get(fileUrl, headers=_headers, stream=True, verify=False)) as _response, open(
|
||||||
"ab") as file:
|
filePath,
|
||||||
|
"ab") as file:
|
||||||
for content in _response.iter_content(chunk_size=chunkSize):
|
for content in _response.iter_content(chunk_size=chunkSize):
|
||||||
file.write(content)
|
file.write(content)
|
||||||
timeTook = time.perf_counter() - st
|
timeTook = time.perf_counter() - st
|
||||||
contentSize += len(content) / chunkSize
|
contentSize += len(content) / chunkSize
|
||||||
print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='')
|
# print('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename), flush=True, end='')
|
||||||
|
# logger.info('\r{}/{}: {}'.format(cnt + 1, len(fileUrls), filename))
|
||||||
|
logger.info(f'文件{filename}下载中...')
|
||||||
|
|
||||||
speed_handle(contentSize + tmpSize / chunkSize, fileSize / chunkSize)
|
speed_handle(contentSize + tmpSize / chunkSize, fileSize / chunkSize)
|
||||||
downloadSpeed = contentSize / timeTook # 平均下载速度
|
downloadSpeed = contentSize / timeTook # 平均下载速度
|
||||||
remainingTime = int(timeTook / (contentSize / remainSize) - timeTook) # 估计剩余下载时间
|
remainingTime = int(timeTook / (contentSize / remainSize) - timeTook) # 估计剩余下载时间
|
||||||
|
|
||||||
print(
|
# print(
|
||||||
|
# '[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format(
|
||||||
|
# downloadSpeed,
|
||||||
|
# remainingTime,
|
||||||
|
# fileSize / chunkSize) + ']', flush=True, end=' '
|
||||||
|
# )
|
||||||
|
|
||||||
|
logger.info(
|
||||||
'[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format(
|
'[' + 'average speed: \033[1;31m{:.2f}MiB/s\033[0m, remaining time: \033[1;32m{}s\033[0m, file size: \033[1;34m{:.2f}MiB\033[0m'.format(
|
||||||
downloadSpeed,
|
downloadSpeed,
|
||||||
remainingTime,
|
remainingTime,
|
||||||
fileSize / chunkSize) + ']', flush=True, end=' '
|
fileSize / chunkSize) + ']'
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
isDownloaded = True
|
isDownloaded = True
|
||||||
break
|
break
|
||||||
@ -98,49 +113,62 @@ def file_download(fileUrl, filePath):
|
|||||||
return isDownloaded
|
return isDownloaded
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
def file_downloads(files, save_path='download'):
|
||||||
|
"""
|
||||||
|
files = [{'url':'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip','name':'dr_py.zip'}]
|
||||||
|
:param save_path:
|
||||||
|
:param files:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
# save_path = 'tmp'
|
||||||
|
os.makedirs(save_path, exist_ok=True)
|
||||||
|
|
||||||
urlTxt = './url.txt'
|
# logging.basicConfig(level=logging.INFO, filename='download/downloading.log', filemode='a', format="%(message)s")
|
||||||
|
|
||||||
pathSave = '/data2/sam_down'
|
|
||||||
os.makedirs(pathSave, exist_ok=True)
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO, filename='downloading.log', filemode='a', format="%(message)s")
|
|
||||||
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||||
logging.info(localtime + ': Start downloading task: {}'.format(urlTxt))
|
logger.info(localtime + ': Start downloading task: {}'.format(files))
|
||||||
failedUrl = []
|
failedUrl = []
|
||||||
|
|
||||||
with open(urlTxt, "r") as f:
|
for cnt, file in enumerate(files):
|
||||||
fileUrls = [line.strip() for line in f.readlines()]
|
fileUrl = file.get('url')
|
||||||
|
if not fileUrl:
|
||||||
|
print('file error:no url')
|
||||||
|
continue
|
||||||
|
fileName = file.get('name')
|
||||||
|
filename = fileName or get_file_name(fileUrl, headers) # 获取文件名称
|
||||||
|
logger.info(f'开始下载{filename}: {fileUrl}')
|
||||||
|
try:
|
||||||
|
t0 = time.perf_counter()
|
||||||
|
isDload = file_download(fileUrl, os.path.join(save_path, filename))
|
||||||
|
t1 = time.perf_counter()
|
||||||
|
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
||||||
|
|
||||||
for cnt, fileUrl in enumerate(fileUrls):
|
if isDload:
|
||||||
|
logger.info(
|
||||||
filename = get_file_name(fileUrl, headers) # 获取文件名称
|
localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0))
|
||||||
|
else:
|
||||||
try:
|
logger.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl))
|
||||||
t0 = time.perf_counter()
|
|
||||||
isDload = file_download(fileUrl, os.path.join(pathSave, filename))
|
|
||||||
t1 = time.perf_counter()
|
|
||||||
localtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
|
|
||||||
|
|
||||||
if isDload:
|
|
||||||
logging.info(
|
|
||||||
localtime + ': {} download successfully! Time consuming: {:.3f}s'.format(filename, t1 - t0))
|
|
||||||
else:
|
|
||||||
# os.remove(os.path.join(pathSave, filename))
|
|
||||||
logging.info(localtime + ': {} download failed! Url: {}'.format(filename, fileUrl))
|
|
||||||
failedUrl.append(fileUrl)
|
|
||||||
|
|
||||||
except:
|
|
||||||
failedUrl.append(fileUrl)
|
failedUrl.append(fileUrl)
|
||||||
|
|
||||||
if len(failedUrl):
|
except:
|
||||||
|
failedUrl.append(fileUrl)
|
||||||
|
|
||||||
with open('failedUrl.txt', 'w') as p:
|
if len(failedUrl):
|
||||||
|
with open(os.path.join(save_path, 'failedUrl.txt'), 'w') as p:
|
||||||
for url in failedUrl:
|
for url in failedUrl:
|
||||||
p.write(url + '\n')
|
p.write(url + '\n')
|
||||||
|
|
||||||
fn = len(failedUrl)
|
fn = len(failedUrl)
|
||||||
sn = len(fileUrls) - fn
|
sn = len(files) - fn
|
||||||
print(
|
# print("\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1)))
|
||||||
"\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1)))
|
logger.info(
|
||||||
|
"\n{} file{} download successfully, {} file{} download failed!".format(sn, 's' * (sn > 1), fn, 's' * (fn > 1)))
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# urlTxt = 'download/urls.txt'
|
||||||
|
# with open(urlTxt, "r") as f:
|
||||||
|
# fileUrls = [line.strip() for line in f.readlines()]
|
||||||
|
|
||||||
|
files = [{'url': 'https://ghproxy.liuzhicong.com/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip',
|
||||||
|
'name': 'dr_py.zip'}]
|
||||||
|
file_downloads(files, 'tmp')
|
||||||
|
|||||||
@ -9,66 +9,75 @@ import sys
|
|||||||
import requests
|
import requests
|
||||||
import os
|
import os
|
||||||
import zipfile
|
import zipfile
|
||||||
import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
|
import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
|
||||||
from utils.log import logger
|
from utils.log import logger
|
||||||
|
from utils.download_progress import file_downloads
|
||||||
from utils.web import get_interval
|
from utils.web import get_interval
|
||||||
from utils.htmlParser import jsoup
|
from utils.htmlParser import jsoup
|
||||||
import ujson
|
import ujson
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
'Referer': 'https://gitcode.net/',
|
'Referer': 'https://gitcode.net/',
|
||||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
|
||||||
}
|
}
|
||||||
|
|
||||||
def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg',size=0):
|
|
||||||
|
def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg', size=0):
|
||||||
jsp = jsoup(url)
|
jsp = jsoup(url)
|
||||||
pdfh = jsp.pdfh
|
pdfh = jsp.pdfh
|
||||||
pdfa = jsp.pdfa
|
pdfa = jsp.pdfa
|
||||||
pd = jsp.pd
|
pd = jsp.pd
|
||||||
try:
|
try:
|
||||||
r = requests.get(url,headers=headers,timeout=2)
|
r = requests.get(url, headers=headers, timeout=2)
|
||||||
html = r.text
|
html = r.text
|
||||||
data = pdfa(html,'ul.hot-list&&li')
|
data = pdfa(html, 'ul.hot-list&&li')
|
||||||
suggs = [{'title':pdfh(dt,'a&&Text'),'url':pd(dt,'a&&href')} for dt in data]
|
suggs = [{'title': pdfh(dt, 'a&&Text'), 'url': pd(dt, 'a&&href')} for dt in data]
|
||||||
# print(html)
|
# print(html)
|
||||||
# print(suggs)
|
# print(suggs)
|
||||||
return suggs
|
return suggs
|
||||||
except:
|
except:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',size=0):
|
|
||||||
|
def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',
|
||||||
|
size=0):
|
||||||
size = int(size) if size else 50
|
size = int(size) if size else 50
|
||||||
pdata = ujson.dumps({"pageNum":0,"pageSize":size})
|
pdata = ujson.dumps({"pageNum": 0, "pageSize": size})
|
||||||
try:
|
try:
|
||||||
r = requests.post(url,headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', 'content-type': 'application/json'},data=pdata,timeout=2)
|
r = requests.post(url, headers={
|
||||||
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
|
||||||
|
'content-type': 'application/json'}, data=pdata, timeout=2)
|
||||||
html = r.json()
|
html = r.json()
|
||||||
# print(html)
|
# print(html)
|
||||||
data = html['data']['navItemList'][0]['hotRankResult']['rankItemList']
|
data = html['data']['navItemList'][0]['hotRankResult']['rankItemList']
|
||||||
suggs = [{'title':dt['title'],'url':dt['url']} for dt in data]
|
suggs = [{'title': dt['title'], 'url': dt['url']} for dt in data]
|
||||||
# print(html)
|
# print(html)
|
||||||
# print(suggs)
|
# print(suggs)
|
||||||
return suggs
|
return suggs
|
||||||
except:
|
except:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def getHotSuggest(s_from,size):
|
|
||||||
|
def getHotSuggest(s_from, size):
|
||||||
if s_from == 'sougou':
|
if s_from == 'sougou':
|
||||||
return getHotSuggest1(size=size)
|
return getHotSuggest1(size=size)
|
||||||
else:
|
else:
|
||||||
return getHotSuggest2(size=size)
|
return getHotSuggest2(size=size)
|
||||||
|
|
||||||
|
|
||||||
def getLocalVer():
|
def getLocalVer():
|
||||||
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
||||||
version_path = os.path.join(base_path, f'js/version.txt')
|
version_path = os.path.join(base_path, f'js/version.txt')
|
||||||
if not os.path.exists(version_path):
|
if not os.path.exists(version_path):
|
||||||
with open(version_path,mode='w+',encoding='utf-8') as f:
|
with open(version_path, mode='w+', encoding='utf-8') as f:
|
||||||
version = '1.0.0'
|
version = '1.0.0'
|
||||||
f.write(version)
|
f.write(version)
|
||||||
else:
|
else:
|
||||||
with open(version_path,encoding='utf-8') as f:
|
with open(version_path, encoding='utf-8') as f:
|
||||||
version = f.read()
|
version = f.read()
|
||||||
return version
|
return version
|
||||||
|
|
||||||
|
|
||||||
def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
|
def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
|
||||||
ver = '1.0.1'
|
ver = '1.0.1'
|
||||||
msg = ''
|
msg = ''
|
||||||
@ -79,17 +88,18 @@ def getOnlineVer(update_proxy='https://ghproxy.liuzhicong.com/'):
|
|||||||
# r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',timeout=(2,2))
|
# r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',timeout=(2,2))
|
||||||
url = f'{update_proxy}https://raw.githubusercontent.com/hjdhnx/dr_py/main/js/version.txt'
|
url = f'{update_proxy}https://raw.githubusercontent.com/hjdhnx/dr_py/main/js/version.txt'
|
||||||
logger.info(f'开始检查线上版本号:{url}')
|
logger.info(f'开始检查线上版本号:{url}')
|
||||||
r = requests.get(url,headers=headers,timeout=(2,2),verify=False)
|
r = requests.get(url, headers=headers, timeout=(2, 2), verify=False)
|
||||||
ver = r.text
|
ver = r.text
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# print(f'{e}')
|
# print(f'{e}')
|
||||||
msg = f'{e}'
|
msg = f'{e}'
|
||||||
logger.info(msg)
|
logger.info(msg)
|
||||||
return ver,msg
|
return ver, msg
|
||||||
|
|
||||||
|
|
||||||
def checkUpdate():
|
def checkUpdate():
|
||||||
local_ver = getLocalVer()
|
local_ver = getLocalVer()
|
||||||
online_ver,msg = getOnlineVer()
|
online_ver, msg = getOnlineVer()
|
||||||
if local_ver != online_ver:
|
if local_ver != online_ver:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
@ -112,6 +122,7 @@ def del_file(filepath):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(f'删除{file_path}发生错误:{e}')
|
logger.info(f'删除{file_path}发生错误:{e}')
|
||||||
|
|
||||||
|
|
||||||
def copytree(src, dst, ignore=None):
|
def copytree(src, dst, ignore=None):
|
||||||
if ignore is None:
|
if ignore is None:
|
||||||
ignore = []
|
ignore = []
|
||||||
@ -123,11 +134,11 @@ def copytree(src, dst, ignore=None):
|
|||||||
if os.path.isdir(from_dir): # 判断是否为文件夹
|
if os.path.isdir(from_dir): # 判断是否为文件夹
|
||||||
if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
|
if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
|
||||||
os.mkdir(to_dir)
|
os.mkdir(to_dir)
|
||||||
copytree(from_dir, to_dir,ignore) # 迭代 遍历子文件夹并复制文件
|
copytree(from_dir, to_dir, ignore) # 迭代 遍历子文件夹并复制文件
|
||||||
elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
|
elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
|
||||||
if ignore:
|
if ignore:
|
||||||
regxp = '|'.join(ignore).replace('\\','/') # 组装正则
|
regxp = '|'.join(ignore).replace('\\', '/') # 组装正则
|
||||||
to_dir_str = str(to_dir).replace('\\','/')
|
to_dir_str = str(to_dir).replace('\\', '/')
|
||||||
if not re.search(rf'{regxp}', to_dir_str, re.M):
|
if not re.search(rf'{regxp}', to_dir_str, re.M):
|
||||||
shutil.copy(from_dir, to_dir) # 复制文件
|
shutil.copy(from_dir, to_dir) # 复制文件
|
||||||
else:
|
else:
|
||||||
@ -140,19 +151,20 @@ def force_copy_files(from_path, to_path, exclude_files=None):
|
|||||||
exclude_files = []
|
exclude_files = []
|
||||||
logger.info(f'开始拷贝文件{from_path}=>{to_path}')
|
logger.info(f'开始拷贝文件{from_path}=>{to_path}')
|
||||||
if not os.path.exists(to_path):
|
if not os.path.exists(to_path):
|
||||||
os.makedirs(to_path,exist_ok=True)
|
os.makedirs(to_path, exist_ok=True)
|
||||||
try:
|
try:
|
||||||
if sys.version_info < (3, 8):
|
if sys.version_info < (3, 8):
|
||||||
copytree(from_path, to_path,exclude_files)
|
copytree(from_path, to_path, exclude_files)
|
||||||
else:
|
else:
|
||||||
if len(exclude_files) > 0:
|
if len(exclude_files) > 0:
|
||||||
shutil.copytree(from_path, to_path, dirs_exist_ok=True,ignore=shutil.ignore_patterns(*exclude_files))
|
shutil.copytree(from_path, to_path, dirs_exist_ok=True, ignore=shutil.ignore_patterns(*exclude_files))
|
||||||
else:
|
else:
|
||||||
shutil.copytree(from_path, to_path, dirs_exist_ok=True)
|
shutil.copytree(from_path, to_path, dirs_exist_ok=True)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
|
logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
|
||||||
|
|
||||||
|
|
||||||
def copy_to_update():
|
def copy_to_update():
|
||||||
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
||||||
tmp_path = os.path.join(base_path, f'tmp')
|
tmp_path = os.path.join(base_path, f'tmp')
|
||||||
@ -173,10 +185,10 @@ def copy_to_update():
|
|||||||
logger.info(f'升级过程中清理jsd文件发生错误:{e}')
|
logger.info(f'升级过程中清理jsd文件发生错误:{e}')
|
||||||
|
|
||||||
# 千万不能覆盖super,base
|
# 千万不能覆盖super,base
|
||||||
paths = ['js','models','controllers','libs','static','templates','utils','txt','jiexi','py','whl','doc']
|
paths = ['js', 'models', 'controllers', 'libs', 'static', 'templates', 'utils', 'txt', 'jiexi', 'py', 'whl', 'doc']
|
||||||
exclude_files = ['txt/pycms0.json','txt/pycms1.json','txt/pycms2.json','base/rules.db']
|
exclude_files = ['txt/pycms0.json', 'txt/pycms1.json', 'txt/pycms2.json', 'base/rules.db']
|
||||||
for path in paths:
|
for path in paths:
|
||||||
force_copy_files(os.path.join(dr_path, path), os.path.join(base_path, path),exclude_files)
|
force_copy_files(os.path.join(dr_path, path), os.path.join(base_path, path), exclude_files)
|
||||||
try:
|
try:
|
||||||
shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
|
shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
|
||||||
shutil.copy(os.path.join(dr_path, 'requirements.txt'), os.path.join(base_path, 'requirements.txt')) # 复制文件
|
shutil.copy(os.path.join(dr_path, 'requirements.txt'), os.path.join(base_path, 'requirements.txt')) # 复制文件
|
||||||
@ -185,13 +197,14 @@ def copy_to_update():
|
|||||||
logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
|
logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
|
def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
|
||||||
update_proxy = (update_proxy or '').strip()
|
update_proxy = (update_proxy or '').strip()
|
||||||
logger.info(f'update_proxy:{update_proxy}')
|
logger.info(f'update_proxy:{update_proxy}')
|
||||||
t1 = getTime()
|
t1 = getTime()
|
||||||
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
||||||
tmp_path = os.path.join(base_path, f'tmp')
|
tmp_path = os.path.join(base_path, f'tmp')
|
||||||
os.makedirs(tmp_path,exist_ok=True)
|
os.makedirs(tmp_path, exist_ok=True)
|
||||||
# url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
|
# url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
|
||||||
# url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip'
|
# url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip'
|
||||||
url = f'{update_proxy}https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip'
|
url = f'{update_proxy}https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip'
|
||||||
@ -203,13 +216,18 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
|
|||||||
try:
|
try:
|
||||||
# print(f'开始下载:{url}')
|
# print(f'开始下载:{url}')
|
||||||
logger.info(f'开始下载:{url}')
|
logger.info(f'开始下载:{url}')
|
||||||
r = requests.get(url,headers=headers,timeout=(20,20),verify=False)
|
|
||||||
rb = r.content
|
|
||||||
download_path = os.path.join(tmp_path, 'dr_py.zip')
|
download_path = os.path.join(tmp_path, 'dr_py.zip')
|
||||||
# 保存文件前清空目录
|
|
||||||
del_file(tmp_path)
|
# r = requests.get(url, headers=headers, timeout=(20, 20), verify=False)
|
||||||
with open(download_path,mode='wb+') as f:
|
# rb = r.content
|
||||||
f.write(rb)
|
# # 保存文件前清空目录
|
||||||
|
# del_file(tmp_path)
|
||||||
|
# with open(download_path,mode='wb+') as f:
|
||||||
|
# f.write(rb)
|
||||||
|
|
||||||
|
# 2023/11/18 改为带进度条的下载
|
||||||
|
file_downloads([{'url': url, 'name': 'dr_py.zip'}], tmp_path)
|
||||||
|
|
||||||
# print(f'开始解压文件:{download_path}')
|
# print(f'开始解压文件:{download_path}')
|
||||||
logger.info(f'开始解压文件:{download_path}')
|
logger.info(f'开始解压文件:{download_path}')
|
||||||
f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
|
f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
|
||||||
@ -227,22 +245,23 @@ def download_new_version(update_proxy='https://ghproxy.liuzhicong.com/'):
|
|||||||
logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
|
logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
|
||||||
return msg
|
return msg
|
||||||
|
|
||||||
def download_lives(live_url:str):
|
|
||||||
|
def download_lives(live_url: str):
|
||||||
t1 = getTime()
|
t1 = getTime()
|
||||||
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
|
||||||
live_path = os.path.join(base_path, f'base/直播.txt')
|
live_path = os.path.join(base_path, f'base/直播.txt')
|
||||||
logger.info(f'尝试同步{live_url}远程内容到{live_path}')
|
logger.info(f'尝试同步{live_url}远程内容到{live_path}')
|
||||||
try:
|
try:
|
||||||
r = requests.get(live_url,headers=headers,timeout=3)
|
r = requests.get(live_url, headers=headers, timeout=3)
|
||||||
auto_encoding = r.apparent_encoding
|
auto_encoding = r.apparent_encoding
|
||||||
if auto_encoding.lower() in ['utf-8','gbk','bg2312','gb18030']:
|
if auto_encoding.lower() in ['utf-8', 'gbk', 'bg2312', 'gb18030']:
|
||||||
r.encoding = auto_encoding
|
r.encoding = auto_encoding
|
||||||
# print(r.encoding)
|
# print(r.encoding)
|
||||||
html = r.text
|
html = r.text
|
||||||
# print(len(html))
|
# print(len(html))
|
||||||
if re.search('cctv|.m3u8',html,re.M|re.I) and len(html) > 1000:
|
if re.search('cctv|.m3u8', html, re.M | re.I) and len(html) > 1000:
|
||||||
logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
|
logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
|
||||||
with open(live_path,mode='w+',encoding='utf-8') as f:
|
with open(live_path, mode='w+', encoding='utf-8') as f:
|
||||||
f.write(html)
|
f.write(html)
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
@ -250,4 +269,4 @@ def download_lives(live_url:str):
|
|||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
|
logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
|
||||||
return False
|
return False
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user