不少人應該常常遇到在網上看到好的學習教程和資料但卻沒有電子檔的,內心頓時癢癢,
下述指導一下你們,如何將網站上的各種教程轉換成 PDF 電子書。html
一個簡單的例子:python
import pdfkit pdfkit.from_url('http://google.com', 'out.pdf') pdfkit.from_file('test.html', 'out.pdf') pdfkit.from_string('Hello!', 'out.pdf')
你也能夠傳遞一個url或者文件名列表:linux
pdfkit.from_url(['google.com', 'yandex.ru', 'engadget.com'], 'out.pdf') pdfkit.from_file(['file1.html', 'file2.html'], 'out.pdf')
也能夠傳遞一個打開的文件:git
with open('file.html') as f: pdfkit.from_file(f, 'out.pdf')
如將自強學堂中的django教程,生成一個pdf文件github
#coding=utf-8 from __future__ import unicode_literals import os,sys,re,time import requests,codecs from bs4 import BeautifulSoup from urllib.parse import urlparse import pdfkit import platform requests.packages.urllib3.disable_warnings() system=platform.system() print(sys.getdefaultencoding()) str_encode='gbk' if system is 'Windows' else 'utf-8' print(str_encode) html_template = """ <!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> </head> <body> {content} </body> </html> """ if not os.path.exists(os.path.join(os.path.dirname(__file__),'html')): os.mkdir(os.path.join(os.path.dirname(__file__),'html')) url_list=[] start_url='http://www.ziqiangxuetang.com/django/django-tutorial.html' # s=requests.session() # html_doc=s.get('{}'.format(start_url),verify=False).content # soup = BeautifulSoup(html_doc,'html.parser') # print(soup.prettify()) def get_url_list(url): """ 獲取全部URL目錄列表 :return: """ last_position = find_last(url, "/") + 1 tutorial_url_head = url[0:last_position] domain = get_domain(url) + "/" print(domain) response = requests.get(url) soup = BeautifulSoup(response.content, "html.parser") urls = [] for a in soup.find_all("a"): href = str(a.get('href')) result = href.find('/') if result == -1: url = tutorial_url_head + href else: url = domain + href if 'django' in url: urls.append(url) return urls def find_last(string, char): last_position = -1 while True: position = string.find(char, last_position + 1) if position == -1: return last_position last_position = position def get_domain(url): r = urlparse(url) return r.scheme + "://" + r.netloc def parse_url_to_html(url,name): """ 解析URL,返回HTML內容 :param url:解析的url :param name: 保存的html文件名 :return: html """ try: response = requests.get(url) soup = BeautifulSoup(response.content, 'html.parser') # 正文 body = soup.find_all(class_="w-col l10 m12") h = str(body) html = h[1:-1] html = html_template.format(content=html) html = html.encode("utf-8") title=soup.title.get_text() print(url) with open('{}/{}'.format(os.path.join(os.path.dirname(__file__),'html'),name), 'wb') as f: f.write(html) return '{}/{}'.format(os.path.join(os.path.dirname(__file__),'html'),name) except Exception as e: print(e) def save_pdf(htmls, file_name): """ 把全部html文件保存到pdf文件 :param htmls: html文件列表 :param file_name: pdf文件名 :return: """ options = { 'page-size': 'Letter', 'margin-top': '0.75in', 'margin-right': '0.75in', 'margin-bottom': '0.75in', 'margin-left': '0.75in', 'encoding': "UTF-8", 'custom-header': [ ('Accept-Encoding', 'gzip') ], 'cookie': [ ('cookie-name1', 'cookie-value1'), ('cookie-name2', 'cookie-value2'), ], 'outline-depth': 10, } pdfkit.from_file(htmls, file_name, options=options) def main(): start = time.time() urls = get_url_list(start_url) htmls = [parse_url_to_html(url, str(index) + ".html") for index, url in enumerate(urls)] print(htmls) try: save_pdf(htmls, 'cralwer_{}.pdf'.format(time.strftime('%Y_%m_%d_%H_%M_%S'))) except Exception as e: print(e) for html in htmls: os.remove(html) total_time = time.time() - start print(u"總共耗時:{0:.2f}秒".format(total_time)) main()
IOError: ‘No wkhtmltopdf executable found’
確保 wkhtmltopdf 在你的系統路徑中($PATH),會經過 configuration進行了配置 (詳情看上文描述)。 在Windows系統中使用where wkhtmltopdf命令 或 在 linux系統中使用 which wkhtmltopdf 會返回 wkhtmltopdf二進制可執行文件所在的確切位置.django
IOError: ‘Command Failed’
若是出現這個錯誤意味着 PDFKit不能處理一個輸入。你能夠嘗試直接在錯誤信息後面直接運行一個命令來查看是什麼致使了這個錯誤 (某些版本的 wkhtmltopdf會由於段錯誤致使處理失敗windows
正常生成,可是出現中文亂碼
在html中加入
cookie
志軍的項目: https://github.com/lzjun567/crawler_html2pdfsession