分析字體文件html
刷新幾回發現字體有變化app
找到關係自動替換字體
字體庫用表結構存儲文字,例如cmap表記錄unicode索引和字形對應關係。glyf是字形表,記錄文字筆畫等數據,網站
有loca表記錄glyf表裏的字形位置,使用字體庫的文字經過loca表找。url
能夠匹配glyf的字形來找unicode和文字的關係spa
參考: http://www.javashuo.com/article/p-nfmrkysh-by.html 3d
思路圖:code
1.下載一個字體作基準,創建基準字體unicode和文字關係。htm
2.刷新後的新字體記爲網站字體2,匹配網站字體1和網站字體2的字形找到兩種unicode的聯繫。blog
3.相同的unicode找到字體庫和unicode的關係,最後unicode替換成文字。
headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"} r=requests.get("https://maoyan.com/board/1",headers=headers) font1_url="http:"+re.findall("url\(\'(\/\/.*?woff)\'\)",r.text,re.M)[0]
#建立font目錄保存基準字體 if not os.path.exists("font"): font1=requests.get(font1_url,headers=headers) os.mkdir("font") with open("./font/base.woff","wb")as f: f.write(font1.content)
base_font = TTFont('./font/base.woff') base_dict=[] for i in range(len(baseFont.getGlyphOrder()[2:])): print(f"對應的數字{i+1}:") w=input() base_dict.append({"code":baseFont.getGlyphOrder()[2:][i],"num":w})
new_font_url="http:"+re.findall("url\(\'(\/\/.*?woff)\'\)",r.text,re.M)[0] font=requests.get(new_font_url,headers=headers) with open("new_font.woff","wb")as f: f.write(font.content) new_font = TTFont('new_font.woff') new_font_code_list=new_font.getGlyphOrder()[2:]
replace_dic=[] for i in range(10): news = new_font['glyf'][new_font_code_list[i]] for j in range(10): bases = base_font['glyf'][base_dict[j]["code"]] if news == bases: unicode=new_font_code_list[i].lower().replace("uni","&#x")+";" num= base_dict[j]["num"] replace_dic.append({"code":unicode,"num":num})
org_data=r.text for i in range(len(replace_dic)): new_data=new_data.replace(replace_dic[i]["code"],replace_dic[i]["num"])
tree=etree.HTML(org_data) dds=tree.xpath('//dl[@class="board-wrapper"]/dd') info=[] for dd in dds: title=dd.xpath('.//p[@class="name"]/a/@title')[0] star=dd.xpath('.//p[@class="star"]/text()')[0].replace("主演:","") time=dd.xpath('.//p[@class="releasetime"]/text()')[0].replace("上映時間:","") realticket=dd.xpath('.//p[@class="realtime"]//text()')[1]+dd.xpath('.//p[@class="realtime"]//text()')[2].strip() totalticket=dd.xpath('.//p[@class="total-boxoffice"]//text()')[1]+dd.xpath('.//p[@class="total-boxoffice"]//text()')[2].strip() info.append({"標題":title,"主演":star,"上映時間":time,"實時票房":realticket,"總票房":totalticket})
import csv csv_file = open("1325.csv", 'w', newline='') keys = [] writer = csv.writer(csv_file) keys = info[1].keys() writer.writerow(keys) for dic in info: for key in keys: if key not in dic: dic[key ] = '' writer.writerow(dic.values()) csv_file.close()