Tools
These tools are not a replacement for manually scanning and investigating systems.

Yuki Chan – Automated Penetration Testing and Auditing Tool

Yuki

It uses the following programs against your chosen target

1
Whois domain analyzer
2
Nslookup
3
Nmap
4
TheHarvester
5
Metagoofil
6
DNSRecon
7
Sublist3r
8
Wafw00f
9
WAFNinja
10
XSS Scanner
11
WhatWeb
12
Spaghetti
13
WPscan
14
WPscanner
15
WPSeku
16
Droopescan ( CMS Vulnerability Scanner WordPress, Joomla, Silverstripe, Drupal, And Moodle)
17
SSLScan
18
SSLyze
19
A2SV
20
Dirsearch
Copied!

Install and Run

1
git clone https://github.com/Yukinoshita47/Yuki-Chan-The-Auto-Pentest.git; cd Yuki-Chan-The-Auto-Pentest; chmod 777 wafninja joomscan install-perl-module.sh yuki.sh ; chmod 777 Module/WhatWeb/whatweb; pip install -r requirements.txt; ./install-perl-module.sh; ./yuki.sh
Copied!

Bluto

GitHub - darryllane/Bluto: DNS Recon | Brute Forcer | DNS Zone Transfer | DNS Wild Card Checks | DNS Wild Card Brute Forcer | Email Enumeration | Staff Enumeration | Compromised Account Checking
GitHub
if you get an error when running it relating to the file below - replace it with the config below

error

1
/usr/local/lib/python2.7/dist-packages/Bluto/modules/data_mine.py
Copied!

config

1
import pdfminer
2
import requests
3
import urllib2
4
import olefile
5
import os
6
import traceback
7
import time
8
import re
9
import random
10
import math
11
import sys
12
import Queue
13
import time
14
import threading
15
import cgi
16
from termcolor import colored
17
from pdfminer.pdfparser import PDFParser
18
from pdfminer.pdfdocument import PDFDocument
19
from bs4 import BeautifulSoup
20
from bluto_logging import info, INFO_LOG_FILE
21
from get_file import get_user_agents
22
from search import doc_bing, doc_exalead
23
from general import get_size
24
25
26
27
def action_download(doc_list, docs):
28
info('Document Download Started')
29
i = 0
30
download_list = []
31
initial_count = 0
32
print 'Gathering Live Documents For Metadata Mining\n'
33
headers = {
34
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.0; pl; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2 GTB7.1 ( .NET CLR 3.5.30729',
35
'Referer': 'https://www.google.co.uk/',
36
'Accept-Language': 'en-US,en;q=0.5',
37
'Cache-Control': 'no-cache'
38
}
39
for doc in doc_list:
40
doc = doc.replace(' ', '%20')
41
try:
42
r = requests.get(doc.encode('utf-8'), headers=headers, verify=False)
43
if r.status_code == 404:
44
r.raise_for_status()
45
46
if r.status_code == 200:
47
params = cgi.parse_header(r.headers.get('Content-Disposition', ''))[-1]
48
if 'filename' not in params:
49
filename = str(doc).replace('%20', ' ').split('/')[-1]
50
with open(docs + filename, "w") as code:
51
i += 1
52
code.write(r.content)
53
code.close()
54
initial_count += 1
55
print('\tDownload Count: {}\r'.format(str(initial_count))),
56
info(str(doc).replace('%20', ' '))
57
download_list.append(str(doc).replace('%20', ' '))
58
59
continue
60
else:
61
filename_t = re.search('filename="(.*)"', r.headers['content-disposition'])
62
filename = filename_t.group(1)
63
64
with open(docs + filename, "w") as code:
65
i += 1
66
code.write(r.content)
67
code.close()
68
initial_count += 1
69
print('\tDownload Count: {}\r'.format(str(initial_count))),
70
download_list.append(str(doc).replace('%20', ' '))
71
info(str(doc).replace('%20', ' '))
72
continue
73
74
75
except ValueError:
76
info('No Filename in header')
77
pass
78
except AttributeError:
79
pass
80
except IOError:
81
info('Not Found: {}'.format(str(doc).replace('%20', ' ')))
82
pass
83
except requests.exceptions.HTTPError:
84
info('Error: File Not Found Server Side: HTTPError')
85
pass
86
except requests.exceptions.ConnectionError:
87
info('Error: File Not Found Server Side: ConnectionError')
88
pass
89
except KeyError:
90
pass
91
except UnboundLocalError:
92
pass
93
except Exception:
94
info('An Unhandled Exception Has Occured, Please Check The Log For Details\n' + INFO_LOG_FILE)
95
info(str(doc).replace('%20', ' '))
96
pass
97
if i < 1:
98
return download_list
99
data_size = get_size(docs)
100
print '\tData Downloaded: {}MB'.format(str(math.floor(data_size)))
101
info('Documents Downloaded: {}'.format(initial_count))
102
return download_list
103
104
105
def doc_search(domain, USERAGENT_F, prox):
106
q1 = Queue.Queue()
107
q2 = Queue.Queue()
108
t1 = threading.Thread(target=doc_bing, args=(domain, USERAGENT_F, prox, q1))
109
t2 = threading.Thread(target=doc_exalead, args=(domain, USERAGENT_F, prox, q2))
110
t1.start()
111
t2.start()
112
t1.join()
113
t2.join()
114
bing = q1.get()
115
exalead = q2.get()
116
list_d = bing + exalead
117
return list_d
118
119
120
#Extract Author PDF
121
def pdf_read(pdf_file_list):
122
info('Extracting PDF MetaData')
123
software_list = []
124
user_names = []
125
for filename in pdf_file_list:
126
info(filename)
127
try:
128
129
fp = open(filename, 'rb')
130
parser = PDFParser(fp)
131
doc = PDFDocument(parser)
132
software = re.sub('[^0-9a-zA-Z]+', ' ', doc.info[0]['Creator'])
133
person = re.sub('[^0-9a-zA-Z]+', ' ', doc.info[0]['Author'])
134
if person:
135
oddity = re.match('(\s\w\s+(\w\s+)+\w)', person)
136
if oddity:
137
oddity = str(oddity.group(1)).replace(' ', '')
138
user_names.append(str(oddity).title())
139
else:
140
user_names.append(str(person).title())
141
if software:
142
oddity2 = re.match('(\s\w\s+(\w\s+)+\w)', software)
143
if oddity2:
144
oddity2 = str(oddity2.group(1)).replace(' ', '')
145
software_list.append(oddity2)
146
else:
147
software_list.append(software)
148
except IndexError:
149
continue
150
except pdfminer.pdfparser.PDFSyntaxError:
151
continue
152
except KeyError:
153
continue
154
except TypeError:
155
continue
156
except Exception:
157
info('An Unhandled Exception Has Occured, Please Check The Log For Details' + INFO_LOG_FILE)
158
continue
159
info('Finished Extracting PDF MetaData')
160
return (user_names, software_list)
161
162
163
164
#Extract Author MS FILES
165
def ms_doc(ms_file_list):
166
software_list = []
167
user_names = []
168
info('Extracting MSDOCS MetaData')
169
for filename in ms_file_list:
170
try:
171
data = olefile.OleFileIO(filename)
172
meta = data.get_metadata()
173
author = re.sub('[^0-9a-zA-Z]+', ' ', meta.author)
174
company = re.sub('[^0-9a-zA-Z]+', ' ', meta.company)
175
software = re.sub('[^0-9a-zA-Z]+', ' ', meta.creating_application)
176
save_by = re.sub('[^0-9a-zA-Z]+', ' ', meta.last_saved_by)
177
if author:
178
oddity = re.match('(\s\w\s+(\w\s+)+\w)', author)
179
if oddity:
180
oddity = str(oddity.group(1)).replace(' ', '')
181
user_names.append(str(oddity).title())
182
else:
183
user_names.append(str(author).title())
184
if software:
185
oddity2 = re.match('(\s\w\s+(\w\s+)+\w)', software)
186
if oddity2:
187
oddity2 = str(oddity2.group(1)).replace(' ', '')
188
software_list.append(oddity2)
189
else:
190
software_list.append(software)
191
192
if save_by:
193
oddity3 = re.match('(\s\w\s+(\w\s+)+\w)', save_by)
194
if oddity3:
195
oddity3 = str(oddity3.group(1)).replace(' ', '')
196
user_names.append(str(oddity3).title())
197
else:
198
user_names.append(str(save_by).title())
199
200
except Exception:
201
pass
202
info('Finished Extracting MSDOC MetaData')
203
return (user_names, software_list)
204
205
#Modules takes in DOMAIN, PROX, USERAGENTS outputs user_names, software_list
206
def doc_start(domain, USERAGENT_F, prox, q):
207
ms_list_ext = ('.docx', '.pptx', '.xlsx', '.doc', '.xls', '.ppt')
208
ms_file_list = []
209
pdf_file_list = []
210
info('Let The Hunt Begin')
211
domain_r = domain.split('.')
212
if not os.path.exists(os.path.expanduser('~/Bluto/doc/{}'.format(domain_r[0]))):
213
os.makedirs(os.path.expanduser('~/Bluto/doc/{}'.format(domain_r[0])))
214
215
location = os.path.expanduser('~/Bluto/doc/{}/'.format(domain_r[0]))
216
info('Data Folder Created ' + location)
217
docs = os.path.expanduser(location)
218
doc_list = doc_search(domain, USERAGENT_F, prox)
219
220
if doc_list == []:
221
q.put(None)
222
return
223
doc_list = set(sorted(doc_list))
224
download_list = action_download(doc_list, docs)
225
download_count = len(download_list)
226
227
for root, dirs, files in os.walk(docs):
228
for filename in files:
229
if str(filename).endswith(ms_list_ext):
230
ms_file_list.append(os.path.join(root, filename))
231
if str(filename).endswith('.pdf'):
232
pdf_file_list.append(os.path.join(root, filename))
233
234
if ms_file_list and pdf_file_list:
235
user_names_ms, software_list_ms = ms_doc(ms_file_list)
236
user_names_pdf, software_list_pdf = pdf_read(pdf_file_list)
237
user_names_t = user_names_ms + user_names_pdf
238
software_list_t = software_list_ms + software_list_pdf
239
240
elif ms_file_list:
241
user_names_ms, software_list_ms = ms_doc(ms_file_list)
242
user_names_t = user_names_ms
243
software_list_t = software_list_ms
244
245
elif pdf_file_list:
246
user_names_pdf, software_list_pdf = pdf_read(pdf_file_list)
247
user_names_t = user_names_pdf
248
software_list_t = software_list_pdf
249
else:
250
user_names_t = []
251
software_list_t = []
252
253
if user_names_t and software_list_t:
254
user_names = sorted(set(user_names_t))
255
software_list = sorted(set(software_list_t))
256
info('The Hunt Ended')
257
q.put((user_names, software_list, download_count, download_list))
258
259
elif software_list_t:
260
software_list = sorted(set(software_list_t))
261
user_names = []
262
info('The Hunt Ended')
263
q.put((user_names, software_list, download_count, download_list))
264
265
elif user_names_t:
266
user_names = sorted(set(user_names_t))
267
software_list = []
268
info('The Hunt Ended')
269
q.put((user_names, software_list, download_count, download_list))
270
elif (user_names_t and software_list) is None:
271
q.put(None)
272
Copied!

The Offensive Web Application Penetration Testing Framework.

1
https://github.com/0xInfection/TIDoS-Framework
Copied!
1
https://github.com/carlospolop/legion
Copied!