Browse Source

Cemnsys API limit bypass

Giulio 5 years ago
parent
commit
36e8ead932
5 changed files with 59 additions and 4 deletions
  1. 3 3
      acasown.py
  2. BIN
      bong/__init__.pyc
  3. 56 1
      censys/__init__.py
  4. BIN
      censys/__init__.pyc
  5. BIN
      ripe/__init__.pyc

+ 3 - 3
acasown.py

@@ -3,10 +3,10 @@ import censys
 import bong
 
 r = ripe.Ripe()
-c = censys.Censys("dummy", "dummy")
+c = censys.Censys_WEB("dummyuser", "dummypass")
 targets = r.search("trenitalia")
 print("Found " + str(len(targets)) + " ranges from Ripe")
 hosts = c.search_ipv4(c.build_query_ipv4(targets))
 print("Found " + str(len(hosts)) + " hosts from Censys")
-for i in hosts:
-	print(i)
+for host in hosts:
+	print(host)

BIN
bong/__init__.pyc


+ 56 - 1
censys/__init__.py

@@ -1,6 +1,8 @@
 import requests
+from bs4 import BeautifulSoup
+import re
 
-class Censys:
+class Censys_API:
 	def __init__(self, uid, secret):
 		self.url = 'https://censys.io/api/v1'
 		self.uid = uid
@@ -44,4 +46,57 @@ class Censys:
 			except:
 				vhosts  = []
 			self.ipv4.append({'ip': host['ip'], 'protocols': host['protocols'], 'vhosts': vhosts})
+		return True
+
+class Censys_WEB:
+	def __init__(self, username, password):
+		self.url = 'https://censys.io/'
+		self.username = username
+		self.password = password
+		if self.login():
+			self.session = self.login()
+		self.ipv4 = []
+
+	def login(self):
+		s = requests.session()
+		requests.get(self.url)
+		return s
+
+	def build_query_ipv4(self, targets):
+		query = ""
+		for t in targets:
+			query += "ip:[" + t['start'] + " TO " + t['end'] + "]"
+			query += " OR "
+		return query[:-4]
+
+	def search_ipv4(self, query):
+		r = self.session.get(self.url + "ipv4/_search?q=", params={"q": query, "page": 1})
+		data = r.text
+		'''Per usare etree bisogna fixare l'html rotto
+		data = "<root>" + data + "</root>"
+		data = re.sub("\<a\ href=\/.*\>.*\<\/a\>", "", data)'''
+		self.parse_ipv4(data)
+		html = BeautifulSoup(data, "lxml")
+		spans = html.find_all('span', {'class': 'SearchResultSectionHeader__statistic'})
+		pages = int(spans[0].text.split('/')[1].strip())
+		count = spans[1].text
+		for page in range(2, pages + 1):
+			r = self.session.get(self.url + "ipv4/_search?q=", params={"q": query, "page": page})
+			data = r.text
+			self.parse_ipv4(data)
+		return self.ipv4
+
+	def parse_ipv4(self, data):
+		html = BeautifulSoup(data, "lxml")
+		vhosts = []
+		protocols = []
+		results = html.find_all('div', {'class': 'SearchResult result'})
+		for raw in results:
+			ip = raw.find_all('span', {'class': 'dns'})[0].get('id')
+			vhosts_html = raw.find_all('i', {'title': 'names on certificate'})
+			if vhosts_html:
+				l = vhosts_html[0].next_sibling.replace(' ', '')
+				for vhost in l.split(','):
+					vhosts.append(vhost)
+			self.ipv4.append({'ip': ip, 'protocols': protocols, 'vhosts': vhosts})
 		return True

BIN
censys/__init__.pyc


BIN
ripe/__init__.pyc