python批量化提交100+shell

前段时间,在写批量化挖掘的脚本,现在已经收工了,大概也就100+shell左右,用来练手的其余sql注入,未授权都交给公益src了。

作者:echo

前段时间,在写批量化挖掘的脚本,现在已经收工了,大概也就100+shell左右,用来练手的其余sql注入,未授权都交给公益src了。

先上图,大佬勿喷,只做一个思路-实施的过程。

既然大家都是漏洞老千层了,直接上思路图和脚本吧,

这里拿fofa的资产来说,比如说挖的是oa的shell和未授权,大部分是不会有域名的,从业务的角度来说,这自用很少有域名,清洗ip拿域名需要用的一些姿势如图,(脚本在文末)

这里的结果是[url]:ip138:xxx aizhan:xxx ,我的思路是split()分割符去ip138和之后的部分,后面在通过split去ip138的部分,拿到查询结果

2.拿到域名后需要找归属和权重,如果是要交src,报告里要附有归属的截图,(脚本在文末,仅供参考)

​ 1.通过re正则匹配返回包标签内的内容,常用匹配

re.findall(r”””<span>(.*?)</span>“””, result.text)

​ i找到归属后

​ ii. 打开一个文本,用来保存查询的结果,常用

​ iii.查归属常用姿势

3.漏洞提交,

​ i.准备个csv,(不用xlsx的原因有很多,主要是兼容问题)

​ ii.通过pandas.drop()可以实现不用递归excel表格内的内容,比如盒子,补天提交,提交后等待几秒即可提交下一部分。

4.一些探讨

​ 实际漏洞验证很复杂,src平台对提交的内容(厂商归属和漏洞验证截图)都看重,你这里没截图就没过,如果说量比较少的情况下,尽量手工收集材料截图。selenium截图不包括地址栏的内容,这样就看不到归属,直接pass掉了。

另外,excel两个常用姿势清洗数据内容: 1. 选中某列,查找aaa,会全部去掉aaa的内容,2.选中某列,智能筛选,输入需要的表达式,会筛选相关的符合要求的列。。

5.脚本内容

1.通过批量验证+文件保存

  1. import re
  2. import requests
  3. import time
  4. from requests.packages.urllib3.exceptions import InsecureRequestWarning
  5. import sys
  6. import random
  7. poc ="/index.php/xxxx"
  8. def module():
  9. print('------------------------------')
  10. print('+来源peiqi文库修改版,魔改author:echo')
  11. print('+使用格式:python vuln-echo.py')
  12. # print('输入漏洞地址')
  13. print('------------------------------')
  14. def vuln(url):
  15. headers = {
  16. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
  17. }
  18. try:
  19. target = url + poc
  20. req\_result = requests.get(target,timeout=3, headers=headers, verify=False)
  21. if ('syntax' in req\_result.text) and req\_result.status\_code == 200:
  22. print("站点{}存在sqli注入".format(url))
  23. f.write(url)
  24. \# print(req\_result.text)
  25. # print(result\_site)
  26. else:
  27. sys.exit(0)
  28. #else:
  29. # print("站点{}不存在sqli注入".format(target\_url))
  30. # sys.exit(0)
  31. except:
  32. print("nmm的手气炸了,出bug了")
  33. print(target)
  34. pass
  35. def Scan(file\_name):
  36. with open(file\_name, "r", encoding='utf8') as scan\_url:
  37. for url in scan\_url:
  38. if url\[:4\] != "http":
  39. url = "http://" + url
  40. url = url.strip('\\n')
  41. try:
  42. vuln(url)
  43. except:
  44. print("请求报错")
  45. continue
  46. if \_\_name\_\_ == '\_\_main\_\_':
  47. module()
  48. target\_url = str(input("输入漏洞文件名\\n"))
  49. with open('./vulnsql1.txt', 'a', encoding='utf-8') as f:
  50. Scan(target\_url)
  1. 2.selenium版批量查域名归属+截图
  2. from selenium import webdriver
  3. import requests
  4. import os
  5. from fake\_useragent import UserAgent
  6. session = requests.session()
  7. import re
  8. class Poc:
  9. def \_\_init\_\_(self, url):
  10. self.url = url
  11. # self.url = info
  12. self.headers = {
  13. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:93.0) Gecko/20100101 Firefox/93.0",
  14. "Host": "www.qcc.com",
  15. }
  16. # self.chrome = webdriver.Chrome(executable\_path='chromedriver.exe')
  17. def qcc(self,ua):
  18. payload = "https://www.qcc.com/web/search?key="
  19. qcc\_headers = {
  20. 'Host': 'www.qcc.com',
  21. 'User-Agent': ua.random,
  22. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\*/\*;q=0.8',
  23. 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
  24. 'Accept-Encoding': 'gzip, deflate, br',
  25. 'Sec-Fetch-Dest': 'document',
  26. 'Sec-Fetch-Mode': 'navigate',
  27. 'Sec-Fetch-Site': 'none',
  28. 'Sec-Fetch-User': '?1',
  29. 'Cookie': 'acw\_tc=7729d2ad16456665232162240e7d9acc6bb665fcb0b634a57fa14bd0e4; QCCSESSID=d7b8f5ab4f4b6865b70c9afbf9;'}
  30. try:
  31. target = payload + self.url
  32. #result = requests.get(target,headers=qcc\_headers)
  33. result = session.get(target,headers=qcc\_headers)
  34. #print(result.status\_code,result.text)
  35. result\_site = re.findall(r"""<span data-v-dcbac042>(.\*?)</span></a>""", result.text)
  36. #if result\_site != \[\]:
  37. print(result\_site\[0\])
  38. except:
  39. print("has some wrong")
  40. pass
  41. if \_\_name\_\_ == '\_\_main\_\_':
  42. location = os.getcwd() + '/fake\_useragent\_0.1.11.json'
  43. ua\_header = UserAgent(path=location)
  44. targets = open("./domain.txt", "r")
  45. #数据清洗
  46. for target in targets.readlines():
  47. target = target.strip()
  48. targets = target.split("http://")\[1\]
  49. # print(targets)
  50. poc = Poc(targets)
  51. poc.qcc(ua\_header)

3.批量ip反查(aizhan+ip138)

  1. import time
  2. import re
  3. import requests
  4. from fake_useragent import UserAgent
  5. from tqdm import tqdm
  6. import os
  7. # ip138
  8. def ip138_chaxun(ip, ua):
  9. ip138_headers = {
  10. 'Host': 'site.ip138.com',
  11. 'User-Agent': ua.random,
  12. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  13. 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
  14. 'Accept-Encoding': 'gzip, deflate, br',
  15. 'Referer': 'https://site.ip138.com/'}
  16. ip138_url = 'https://site.ip138.com/' + str(ip) + '/'
  17. try:
  18. ip138_res = requests.get(url=ip138_url, headers=ip138_headers, timeout=2).text
  19. if '<li>暂无结果</li>' not in ip138_res:
  20. result_site = re.findall(r"""</span><a href="/(.*?)/" target="_blank">""", ip138_res)
  21. return result_site
  22. except:
  23. pass
  24. # 爱站
  25. def aizhan_chaxun(ip, ua):
  26. aizhan_headers = {
  27. 'Host': 'dns.aizhan.com',
  28. 'User-Agent': ua.random,
  29. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  30. 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
  31. 'Accept-Encoding': 'gzip, deflate, br',
  32. 'Referer': 'https://dns.aizhan.com/'}
  33. aizhan_url = 'https://dns.aizhan.com/' + str(ip) + '/'
  34. try:
  35. aizhan_r = requests.get(url=aizhan_url, headers=aizhan_headers, timeout=2).text
  36. aizhan_nums = re.findall(r'''<span class="red">(.*?)</span>''', aizhan_r)
  37. if int(aizhan_nums[0]) > 0:
  38. aizhan_domains = re.findall(r'''rel="nofollow" target="_blank">(.*?)</a>''', aizhan_r)
  39. return aizhan_domains
  40. except:
  41. pass
  42. def catch_result(i):
  43. ua_header = UserAgent()
  44. i = i.strip()
  45. try:
  46. # ip = i.split(':')[1].split('//')[1]
  47. ip = i.split(":")[0]
  48. ip138_result = ip138_chaxun(ip, ua_header)
  49. aizhan_result = aizhan_chaxun(ip, ua_header)
  50. time.sleep(1)
  51. if ((ip138_result != None and ip138_result!=[]) or aizhan_result != None ):
  52. with open("vulnwebshell.txt", 'a') as f:
  53. result = "[url]:" + i + " " + "[ip138]:" + str(ip138_result) + " [aizhan]:" + str(aizhan_result)
  54. print(result)
  55. f.write(result + "\n")
  56. else:
  57. with open("反查失败列表.txt", 'a') as f:
  58. f.write(i + "\n")
  59. except:
  60. pass
  61. if __name__ == '__main__':
  62. url_list = open("vuln.txt", 'r').readlines()
  63. url_len = len(open("vuln.txt", 'r').readlines())
  64. #每次启动时清空两个txt文件
  65. if os.path.exists("反查失败列表.txt"):
  66. f = open("反查失败列表.txt", 'w')
  67. f.truncate()
  68. if os.path.exists("vulnmail.txt"):
  69. f = open("vulnmail.txt", 'w')
  70. f.truncate()
  71. for i in tqdm(url_list):
  72. #i=i.split(":")[0]
  73. catch_result(i)
  • 发表于 2022-04-01 17:30:03
  • 阅读 ( 8493 )
  • 分类:WEB安全

3 条评论

echoa
echoa

6 篇文章

站长统计