#!/usr/bin/env python
#coding: utf-8
from bs4 import BeautifulSoup
import urllib
import urllib.request
import sys
from imp import reload
reload(sys)
#sys.setdefaultencoding("utf-8")html
# the url of the page
url = 'https://www.wikipedia.org/'python
def findAllLink(url):
'''
Get hyperlinks from web pages
'''
# agreement, domain name
proto, rest = urllib.request.splittype(url)
domain = urllib.request.splithost(rest)[0]web
# read the page
html = urllib.request.urlopen(url).read()dom
# Extract hyperlinks
a = BeautifulSoup(html).findAll('a')url
# filter
alist = [i.attrs['href'] for i in a if i.attrs['href'][0] != 'j']
# 將形如#comment-text的錨點補全成http://www.ruanyifeng.com/blog/2015/05/co.html,將形如/feed.html補全爲http://www.ruanyifeng.com/feed.html
alist = map(lambda i: proto + '://' + domain + i if i[0] == '/' else url + i if i[0] == '#' else i, alist)
return alistrest
if __name__ == '__main__':
for i in findAllLink(url):
print(i)htm