上周的铁人三项比赛,差点因为一个.git的目录错失了代码审计的机会,虽然御剑是极好的,但是为了装逼决定自己写一个(纯属自娱自乐)
Code:
# -*-coding:utf-8 -*-
import sys
import re
import urlparse
import os
import requests
import time
import datetime
def print_banner():
print "*"*62
print '''* _ _ *
* | |__ __ _ ___| | _____ ___ __ _ _ __ _ __ ___ _ __ *
* | '_ \ / _` |/ __| |/ / __|/ __/ _` | '_ \| '_ \ / _ \ '__|*
* | |_) | (_| | (__| <\__ \ (__ (_| | | | | | | | __/ | *
* |_.__/ \__,_|\___|_|\_\___/\___\__,_|_| |_|_| |_|\___|_| *'''
print "*"*62
print "Usage: backscanner.py [url] [lists.txt path]"
def get_url_and_list():
if len(sys.argv)==1:
url=raw_input("Please input the URL you wanna test:\n")
path_of_list=raw_input("please input the path of lists:\n")
elif len(sys.argv)==2:
url=sys.argv[1]
path_of_list=raw_input("please input the path of lists:\n")
elif len(sys.argv)==3:
url=sys.argv[1]
path_of_list=sys.argv[2]
else:
print "Out of parameter range"
url=url.lower()
if re.match("^https?:\/{2}\w.+/?[\w.]?$",url):
urls=urlparse.urlsplit(url)
#print urls
url=urls.scheme+"://"+urls.netloc+"/"
print "[Target] "+url
else:
print "[Wrong] Bad URL!"
sys.exit(0)
if os.path.exists(path_of_list):
if os.path.isdir(path_of_list):
print "[Wrong] Bad path of list"
sys.exit(0)
elif os.path.isfile(path_of_list):
print "[ List ] "+path_of_list
else:
print "[Wrong] Bad path of list"
sys.exit(0)
else:
print "[Wrong] The input file might not exist"
sys.exit(0)
print "+"+"-"*60+"+"
print "[ warn ] Scanning now!please wait......"
print "+"+"-"*60+"+"
return (url,path_of_list)
def get_dict(path):
lists=[]
f=open(path)
line=f.readline()
while line:
lists.append(line.strip('\n'))
line=f.readline()
f.close()
return lists
def scanner(url,lists):
dicts={}
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
for i in lists:
if lists[0]=="/":
req_url=url+i[1:]
else:
req_url=url+i
resp=requests.get(req_url,headers=headers,allow_redirects=False)
if resp.status_code==200:
print "["+str(resp.status_code)+"] "+req_url
dicts[req_url]=str(resp.status_code)
elif str(resp.status_code)[0]=="3":
print "["+str(resp.status_code)+"] "+req_url
dicts[req_url]=str(resp.status_code)
elif resp.status_code==403:
print "["+str(resp.status_code)+"] "+req_url
dicts[req_url]=str(resp.status_code)
else:
continue
if len(dicts)==0:
print "Not find any file!"
return dicts
def in_log(dicts):
f=open("scanlog.txt",'w+')
for i in dicts:
f.write(i+" "+dicts[i]+"\n")
f.close()
print
def main():
print_banner()
print
print "[Start Time] " +time.asctime(time.localtime(time.time()))
start_time=datetime.datetime.now()
gets=get_url_and_list()
url=gets[0]
path=gets[1]
lists=get_dict(path)
print
dicts=scanner(url,lists)
in_log(dicts)
print "+"+"-"*60+"+"
print "[ End Time ] " +time.asctime(time.localtime(time.time()))
print "[ timeused ] " +str((datetime.datetime.now()-start_time).seconds)+" s"
print "[ warn ] The result had been saved in the scanlog.txt"
print "+"+"-"*60+"+"
if __name__=="__main__":
main()
在正则上明显不熟悉,对着正则表达式表对了好久
嗯,算是巩固下Python吧,没有写多线程,Python的多线程有些鸡肋,采用Get方式获取稍微稳一点
速率上也勉强过得去,如果是响应较慢的站还是用御剑吧:)
效果图:
下载地址:
backscanner