Python 網站後臺掃描腳本

Python  網站後臺掃描腳本php

 

#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
url = "http://123.207.123.228/"
txt = open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")
open_url = []
all_url = []
def search_url(url,txt):
	with open(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :
		for each in f:
			each = each.replace('\n','')
			urllist = url+each
			all_url.append(urllist)
			print("查找:"+urllist+'\n')
			try:
				req = urllib.urlopen(urllist)
				if req.getcode() == 200:
					open_url.append(urllist)
				if req.getcode() == 301:
					open_url.append(urllist)
			except:
				pass
def main():
	search_url(url,txt)
	if open_url:
		print("後臺地址:")
		for each in open_url:
			print("[+]"+each)
	else:
		print("沒有找到網站後臺")
if __name__ == "__main__":
	main()

 

 

#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
url = "http://123.207.123.228/"
txt = open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")
open_url = []
all_url = []
def search_url(url,txt):
	with open(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :
		for each in f:
			each = each.replace('\n','')
			urllist = url+each
			all_url.append(urllist)
			handle_url(urllist)

def handle_url(urllist):
	print("查找:"+urllist+'\n')
	try:
		req = urllib.urlopen(urllist)
		if req.getcode() == 200:
			open_url.append(urllist)
		if req.getcode() == 301:
			open_url.append(urllist)
	except:
		pass

def main():
	search_url(url,txt)
	if open_url:
		print("後臺地址:")
		for each in open_url:
			print("[+]"+each)
	else:
		print("沒有找到網站後臺")
if __name__ == "__main__":
	main()

 

 

 

師傅讓我多看看-->多線程python

這裏就加個多線程吧。json

#!/usr/bin/python
#coding=utf-8
import sys
import urllib
import time
import threading
url = "http://123.207.123.228/"
txt = open(r"C:\Users\ww\Desktop\houtaiphp.txt","r")
open_url = []
all_url = []
threads = []
def search_url(url,txt):
    with open(r"C:\Users\ww\Desktop\houtaiphp.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = url+each
            all_url.append(urllist)
def handle_url(urllist):
    print("查找:"+urllist+'\n')
    try:
        req = urllib.urlopen(urllist)
        if req.getcode() == 200:
            open_url.append(urllist)
        if req.getcode() == 301:
            open_url.append(urllist)
    except:
        pass

def main():
    search_url(url,txt)
    for each in all_url:
        t = threading.Thread(target = handle_url,args=(each,))
        threads.append(t)
        t.start()
    for t in threads:
        t.join()
    if open_url:
        print("後臺地址:")
        for each in open_url:
            print("[+]"+each)
    else:
        print("沒有找到網站後臺")
if __name__ == "__main__":
    start = time.clock()
    main()
    end = time.clock()
    print("spend time is:%.3f seconds" %(end-start))

多線程和沒加線程的時間對比api

 

 

 

--------------------------------------------------------------------------------------------------------------------------------------------------瀏覽器

 

利用zoomeye搜索多線程

調用ZoomEye API獲取信息app

主要涉及模塊urllib,json,os模塊。post

# coding: utf-8
import os
import requests
import json
 
access_token = ''
ip_list = []
 
def login():
    """
        輸入用戶米密碼 進行登陸操做
    :return: 訪問口令 access_token
    """
    user = raw_input('[-] input : username :')
    passwd = raw_input('[-] input : password :')
    data = {
        'username' : user,
        'password' : passwd
    }
    data_encoded = json.dumps(data)  # dumps 將 python 對象轉換成 json 字符串
    try:
        r = requests.post(url = 'https://api.zoomeye.org/user/login',data = data_encoded)
        r_decoded = json.loads(r.text) # loads() 將 json 字符串轉換成 python 對象
        global access_token
        access_token = r_decoded['access_token']
    except Exception,e:
        print '[-] info : username or password is wrong, please try again '
        exit()
 
def saveStrToFile(file,str):
    """
        將字符串寫如文件中
    :return:
    """
    with open(file,'w') as output:
        output.write(str)
 
def saveListToFile(file,list):
    """
        將列表逐行寫如文件中
    :return:
    """
    s = '\n'.join(list)
    with open(file,'w') as output:
        output.write(s)
 
def apiTest():
    """
        進行 api 使用測試
    :return:
    """
    page = 1
    global access_token
    with open('access_token.txt','r') as input:
        access_token = input.read()
    # 將 token 格式化並添加到 HTTP Header 中
    headers = {
        'Authorization' : 'JWT ' + access_token,
    }
    # print headers
    while(True):
        try:
             
            r = requests.get(url = 'https://api.zoomeye.org/host/search?query="phpmyadmin"&facet=app,os&page=' + str(page),
                         headers = headers)
            r_decoded = json.loads(r.text)
            # print r_decoded
            # print r_decoded['total']
            for x in r_decoded['matches']:
                print x['ip']
                ip_list.append(x['ip'])
            print '[-] info : count ' + str(page * 10)
 
        except Exception,e:
            # 若搜索請求超過 API 容許的最大條目限制 或者 所有搜索結束,則終止請求
            if str(e.message) == 'matches':
                print '[-] info : account was break, excceeding the max limitations'
                break
            else:
                print  '[-] info : ' + str(e.message)
        else:
            if page == 10:
                break
            page += 1
 
def main():
    # 訪問口令文件不存在則進行登陸操做
    if not os.path.isfile('access_token.txt'):
        print '[-] info : access_token file is not exist, please login'
        login()
        saveStrToFile('access_token.txt',access_token)
 
    apiTest()
    saveListToFile('ip_list.txt',ip_list)
 
if __name__ == '__main__':
    main()

 

上面的腳本是搜索 phpmyadmin 的。搜索獲得的 IP 會保存在同路徑下的 ip_list.txt 文件。測試

可是搜索到的 ip 並非都可以訪問的,因此這裏寫個了識別 phpmyadmin 的腳本,判斷是否存在,是則輸出。網站

#!/usr/bin/python
#coding=utf-8
import sys
import time
import requests
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##瀏覽器請求頭
open_url = []
all_url = []
payloa = 'http://'
payload = '/phpmyadmin/index.php'
def search_url():
    with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = payloa+each+payload
            all_url.append(urllist)
            handle_url(urllist)
def handle_url(urllist):
    #print('\n'+urllist)
    #print '----------------------------'
    try:
        start_htm = requests.get(urllist,headers=headers)
        #print start_htm
        if start_htm.status_code == 200:
            print '*******************************************'
            print urllist
    except:
        pass
if __name__ == "__main__":
    search_url()

 

 

加個多線程,畢竟工做量很大。

#!/usr/bin/python
#coding=utf-8
import sys
import time
import requests
import threading
headers = {'User-Agent':"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0"}##瀏覽器請求頭
open_url = []
all_url = []
threads = [] 
payloa = 'http://'
payload = '/phpmyadmin/index.php'
def search_url():
    with open(r"C:\Users\ww\Desktop\ip_list.txt","r") as f :
        for each in f:
            each = each.replace('\n','')
            urllist = payloa+each+payload
            all_url.append(urllist)
            #handle_url(urllist)
def handle_url(urllist):
    #print('\n'+urllist)
    #print '----------------------------'
    try:
        start_htm = requests.get(urllist,headers=headers)
        #print start_htm
        if start_htm.status_code == 200:
            print '*******************************************'
            print urllist
    except:
        pass
def main():
    search_url()
    for each in all_url:
        t = threading.Thread(target=handle_url,args=(each,))
        threads.append(t)
        t.start()
    for t in threads:
        t.join()
if __name__ == "__main__":
    start = time.clock()
    main()
    end = time.clock()
    print("spend time is %.3f seconds" %(end-start))

 

 

這下就方便了許多。

任重而道遠!

相關文章
相關標籤/搜索