diff --git a/tools/generate_html/daily_dev.json b/tools/generate_html/daily_dev.json
index ee54f887d6acf1ceef67a44c5cb6a49495231489..dc9069832dacc0c67e8fdb0d76ce70ec72c20ae5 100644
--- a/tools/generate_html/daily_dev.json
+++ b/tools/generate_html/daily_dev.json
@@ -1,20 +1,6 @@
[
{
"id" : 1 ,
- "name" : "lite",
- "branch" : "br_base",
- "html_version" : "br_base",
- "whl_path" : "/mindspore/website/br_base/lite/centos_x86/cloud_fusion/python39",
- "whl_name" : "mindspore_lite-.*-cp39-cp39-linux_x86_64.whl$",
- "environ" : "MS_PATH",
- "uninstall_name" : "mindspore_lite",
- "tar_path" : "/mindspore/website/br_base/lite/centos_x86/cloud_fusion/python39",
- "tar_name" : "mindspore-lite-.*-linux-x64.tar.gz$",
- "extra_whl_path" : "/mindspore/website/br_base/cpu/x86_64",
- "extra_whl_name" : "mindspore-.*-cp39-cp39-linux_x86_64.whl$"
- },
- {
- "id" : 2 ,
"name" : "mindspore",
"branch" : "br_base",
"html_version" : "br_base",
@@ -24,7 +10,7 @@
"uninstall_name" : "mindspore"
},
{
- "id" : 3 ,
+ "id" : 2 ,
"name" : "tutorials",
"branch" : "br_base",
"html_version" : "br_base",
diff --git a/tools/generate_html/replace_html_menu.py b/tools/generate_html/replace_html_menu.py
index 01cde4439e33323820e672e7329ed67ce019ff56..0a8fed2545302c38800feb2010efc447fe0c88ac 100644
--- a/tools/generate_html/replace_html_menu.py
+++ b/tools/generate_html/replace_html_menu.py
@@ -316,14 +316,10 @@ def replace_html_menu(html_path, hm_ds_path):
// let the scorer override scores with a custom scoring function"""
- old_searchtools_content1 = """docContent = htmlElement.find('[role=main]')[0];"""
- new_searchtools_content1 = """htmlElement.find('[role=main]').find('[itemprop=articleBody]').find('style').remove();
- docContent = htmlElement.find('[role=main]')[0];"""
with open(searchtools_path, 'r+', encoding='utf-8') as f:
searchtools_content = f.read()
new_content = searchtools_content.replace(old_searchtools_content, new_searchtools_content)
new_content = new_content.replace('linkUrl +', 'requestUrl +')
- new_content = new_content.replace(old_searchtools_content1, new_searchtools_content1)
if new_content != searchtools_content:
f.seek(0)
f.truncate()
diff --git a/tools/generate_html/run.py b/tools/generate_html/run.py
index a4547b24faf6639d23d004ace1b3734c355ee0d2..5cb712992f424644d8b006f04811b8fcab26fad3 100644
--- a/tools/generate_html/run.py
+++ b/tools/generate_html/run.py
@@ -21,11 +21,11 @@ from lxml import etree
from replace_html_menu import replace_html_menu, modify_menu_num
# 下载仓库
-def git_clone(repo_url, repo_dir):
+def git_clone(repo_url, repo_dir, repo_branch):
if not os.path.exists(repo_dir):
print("Cloning repo.....")
os.makedirs(repo_dir, exist_ok=True)
- Repo.clone_from(repo_url, repo_dir, branch="master")
+ Repo.clone_from(repo_url, repo_dir, branch=repo_branch, depth=1)
print("Cloning Repo Done.")
# 更新仓库
@@ -113,12 +113,12 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
# 读取json文件数据
if version == "daily" or os.path.exists(os.path.join(os.path.dirname(__file__), "daily_dev.json")):
flag_dev = 1
- with open(os.path.join(os.path.dirname(__file__), "daily_dev.json"), 'r+', encoding='utf-8') as f:
- data = json.load(f)
+ with open(os.path.join(os.path.dirname(__file__), "daily_dev.json"), 'r+', encoding='utf-8') as g:
+ data = json.load(g)
else:
flag_dev = 0
- with open(os.path.join(os.path.dirname(__file__), "version.json"), 'r+', encoding='utf-8') as f:
- data = json.load(f)
+ with open(os.path.join(os.path.dirname(__file__), "version.json"), 'r+', encoding='utf-8') as g:
+ data = json.load(g)
with open(os.path.join(os.path.dirname(__file__), "base_version.json"), 'r+', encoding='utf-8') as g:
data_b = json.load(g)
@@ -148,6 +148,9 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
if data[i]['environ'] == "MS_PATH":
repo_url = "https://gitee.com/mindspore/mindspore.git"
repo_path = f"{REPODIR}/mindspore"
+ elif data[i]['environ'] == "MSL_PATH":
+ repo_url = "https://gitee.com/mindspore/mindspore-lite.git"
+ repo_path = f"{REPODIR}/mindspore-lite"
elif data[i]['environ'] == "MSC_PATH":
repo_url = "https://gitee.com/mindspore/mindscience.git"
repo_path = f"{REPODIR}/mindscience"
@@ -167,10 +170,10 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
if data[i]['environ'] and branch_:
os.environ[data[i]['environ']] = repo_path
try:
- status_code = requests.get(repo_url, headers=headers).status_code
+ status_code = requests.get(repo_url, headers=headers, timeout=30).status_code
if status_code == 200:
if not os.path.exists(repo_path):
- git_clone(repo_url, repo_path)
+ git_clone(repo_url, repo_path, branch_)
if data[i]['environ'] == "MSC_PATH":
if data[i]['name'] == "mindscience":
git_update(repo_path, branch_)
@@ -237,7 +240,8 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
href = link_.get("href", "")
if re.findall(name, title) and not os.path.exists(os.path.join(WHLDIR, title)):
download_url = url+href
- downloaded = requests.get(download_url, stream=True, auth=(user, pd), verify=False)
+ downloaded = requests.get(download_url, stream=True, auth=(user, pd),
+ verify=False, timeout=30)
with open(title, 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -261,7 +265,8 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
href = link_.get("href", "")
if re.findall(name, title) and not os.path.exists(os.path.join(WHLDIR, title)):
download_url = url+href
- downloaded = requests.get(download_url, stream=True, auth=(user, pd), verify=False)
+ downloaded = requests.get(download_url, stream=True, auth=(user, pd),
+ verify=False, timeout=30)
with open(title, 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -286,7 +291,8 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
href = link_.get("href", "")
if re.findall(name, title) and not os.path.exists(os.path.join(WHLDIR, title)):
download_url = url+href
- downloaded = requests.get(download_url, stream=True, auth=(user, pd), verify=False)
+ downloaded = requests.get(download_url, stream=True, auth=(user, pd),
+ verify=False, timeout=30)
with open(title, 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -311,7 +317,8 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
href = link_.get("href", "")
if re.findall(name, title):
download_url = url+href
- downloaded = requests.get(download_url, stream=True, auth=(user, pd), verify=False)
+ downloaded = requests.get(download_url, stream=True, auth=(user, pd),
+ verify=False, timeout=30)
with open(title, 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -323,7 +330,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
if data[i]['whl_path'] != "":
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
download_url = release_url + data[i]['whl_path'] + data[i]['whl_name']
- downloaded = requests.get(download_url, stream=True, verify=False)
+ downloaded = requests.get(download_url, stream=True, verify=False, timeout=30)
with open(data[i]['whl_name'], 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -333,7 +340,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
if 'extra_whl_path' in data[i] and data[i]['extra_whl_path'] != "":
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
download_url = release_url + data[i]['extra_whl_path'] + data[i]['extra_whl_name']
- downloaded = requests.get(download_url, stream=True, verify=False)
+ downloaded = requests.get(download_url, stream=True, verify=False, timeout=30)
with open(data[i]['extra_whl_name'], 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -344,7 +351,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
if data[i]['tar_path'] != '':
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
download_url = release_url + data[i]['tar_path'] + data[i]['tar_name']
- downloaded = requests.get(download_url, stream=True, verify=False)
+ downloaded = requests.get(download_url, stream=True, verify=False, timeout=30)
with open(data[i]['tar_name'], 'wb') as fd:
#shutil.copyfileobj(dowmloaded.raw, fd)
for chunk in downloaded.iter_content(chunk_size=512):
@@ -415,7 +422,8 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
os.chdir(os.path.join(DOCDIR, "../../", i))
else:
os.chdir(os.path.join(DOCDIR, "../../docs", i))
- subprocess.run(["pip", "install", "-r", "requirements.txt"])
+ install_req_cmd = ["pip", "install", "-r", "requirements.txt"]
+ subprocess.run(install_req_cmd)
try:
if replace_flag:
@@ -449,6 +457,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
pass
# 输出英文
+ mk_clean_cmd = ["make", "clean"]
if os.path.exists("source_en"):
try:
print(f"当前输出-{i}- 的-英文-版本---->")
@@ -460,7 +469,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
f.truncate()
f.write(content_mod)
- subprocess.run(["make", "clean"])
+ subprocess.run(mk_clean_cmd)
cmd_make = ["make", "html"]
process = subprocess.Popen(cmd_make, stderr=subprocess.PIPE, encoding="utf-8")
_, stderr = process.communicate()
@@ -479,12 +488,10 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
else:
if i == "mindspore":
TARGET = f"{OUTPUTDIR}/docs/en/{ArraySource[i]}"
- os.makedirs(os.path.dirname(TARGET), exist_ok=True)
- shutil.copytree("build_en/html", TARGET)
else:
TARGET = f"{OUTPUTDIR}/{i}/en/{ArraySource[i]}"
- os.makedirs(os.path.dirname(TARGET), exist_ok=True)
- shutil.copytree("build_en/html", TARGET)
+ os.makedirs(os.path.dirname(TARGET), exist_ok=True)
+ shutil.copytree("build_en/html", TARGET)
# pylint: disable=W0702
except:
print(f"{i} 的 英文版本运行失败")
@@ -500,7 +507,7 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
f.seek(0)
f.truncate()
f.write(content_mod)
- subprocess.run(["make", "clean"])
+ subprocess.run(mk_clean_cmd)
cmd_make = ["make", "html"]
process = subprocess.Popen(cmd_make, stderr=subprocess.PIPE, encoding="utf-8")
_, stderr = process.communicate()
@@ -517,12 +524,10 @@ def main(version, user, pd, WGETDIR, release_url, generate_list):
else:
if i == "mindspore":
TARGET = f"{OUTPUTDIR}/docs/zh-CN/{ArraySource[i]}"
- os.makedirs(os.path.dirname(TARGET), exist_ok=True)
- shutil.copytree("build_zh_cn/html", TARGET)
else:
TARGET = f"{OUTPUTDIR}/{i}/zh-CN/{ArraySource[i]}"
- os.makedirs(os.path.dirname(TARGET), exist_ok=True)
- shutil.copytree("build_zh_cn/html", TARGET)
+ os.makedirs(os.path.dirname(TARGET), exist_ok=True)
+ shutil.copytree("build_zh_cn/html", TARGET)
# pylint: disable=W0702
except:
print(f"{i} 的 中文版本运行失败")
@@ -665,11 +670,15 @@ if __name__ == "__main__":
if os.path.exists(os.path.join(output_path, f_name)):
os.remove(os.path.join(output_path, f_name))
shutil.copy(os.path.join(theme_path, f_name), os.path.join(output_path, f_name))
+ old_searchtools_content = """docContent = htmlElement.find('[role=main]')[0];"""
+ new_searchtools_content = """htmlElement.find('[role=main]').find('[itemprop=articleBody]').find('style').remove();
+ docContent = htmlElement.find('[role=main]')[0];"""
# pylint: disable=W0621
for lg in ['en', 'zh-CN']:
# pylint: disable=W0621
for out_name in theme_list:
try:
+ static_path_searchtools = glob.glob(f"{output_path}/{out_name}/{lg}/*/_static/searchtools.js")[0]
static_path_css = glob.glob(f"{output_path}/{out_name}/{lg}/*/_static/css/theme.css")[0]
static_path_js = glob.glob(f"{output_path}/{out_name}/{lg}/*/_static/js/theme.js")[0]
static_path_jquery = glob.glob(f"{output_path}/{out_name}/{lg}/*/_static/jquery.js")[0]
@@ -734,6 +743,17 @@ if __name__ == "__main__":
os.remove(static_path_js_html5p)
if os.path.exists(static_path_js_html5):
os.remove(static_path_js_html5)
+ # 去除搜索页面冗余样式展示
+ if os.path.exists(static_path_searchtools):
+ with open(static_path_searchtools, 'r+', encoding='utf-8') as k:
+ searchtools_content = k.read()
+ if new_searchtools_content not in searchtools_content:
+ new_content_s = searchtools_content.replace(old_searchtools_content,
+ new_searchtools_content)
+ if new_content_s != searchtools_content:
+ k.seek(0)
+ k.truncate()
+ k.write(new_content_s)
# pylint: disable=W0702
# pylint: disable=W0703