diff --git a/README.md b/README.md index a567144..7cc69cd 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ | .___________________. |==| Web Security Scanner | | ................. | | | | | :::GSec Running!::| | | Author: c0d3ninja - | | ::::::::::::::::: | | | Version: beta-v0.19 + | | ::::::::::::::::: | | | Version: beta-v0.20 | | :1337 bugs found!:| | | Instagram: gotr00t0day | | ::::::::::::::::: | | | | | ::::::::::::::::: | | | diff --git a/gsec.py b/gsec.py index 3e0875a..02f121d 100644 --- a/gsec.py +++ b/gsec.py @@ -22,7 +22,7 @@ | .___________________. |==| {Fore.YELLOW}Web Security Scanner{Fore.RESET} | | ................. | | | | | :::GSec Running!::| | | {Fore.YELLOW}Author: {Fore.MAGENTA}c0d3ninja{Fore.RESET} - | | ::::::::::::::::: | | | {Fore.YELLOW}Version: {Fore.MAGENTA}beta-v0.19{Fore.RESET} + | | ::::::::::::::::: | | | {Fore.YELLOW}Version: {Fore.MAGENTA}beta-v0.20{Fore.RESET} | | :1337 bugs found!:| | | {Fore.YELLOW}Instagram: {Fore.MAGENTA}gotr00t0day{Fore.RESET} | | ::::::::::::::::: | | | | | ::::::::::::::::: | | | @@ -112,6 +112,7 @@ async def main(): hostheader_injection.host_header_injection(args.target) head_vuln.head_auth_bypass(args.target) path_traversal.path_traversal_scan(args.target) + crawler.scan(args.target) await loginscanner.main(args.target) print("\n") print(f"\t\t {Fore.MAGENTA} SCAN FINISHED{Fore.LIGHTMAGENTA_EX}!{Fore.MAGENTA}!{Fore.YELLOW}!{Fore.RESET}") diff --git a/utils/crawler.py b/utils/crawler.py new file mode 100644 index 0000000..f83db60 --- /dev/null +++ b/utils/crawler.py @@ -0,0 +1,19 @@ +from urllib.parse import urljoin +import requests +import re + + +user_agent_ = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" +header = {"User-Agent": user_agent_} + +def scan(url: str) -> str: + s = requests.Session() + r = s.get(url, verify=False, headers=header) + content = r.content + links = re.findall('(?:href=")(.*?)"', content.decode('utf-8')) + link_list = [] + for page_links in links: + page_links = urljoin(url, page_links) + link_list.append(page_links + "\n") + with open("output/spider.txt", "w") as f: + f.writelines(link_list) \ No newline at end of file