diff --git a/README.md b/README.md index fc11bd0..5a5be61 100644 --- a/README.md +++ b/README.md @@ -27,9 +27,11 @@ Simple import any of the 3 packages and then add define the variables accordingl from GVA.scanner import NetworkScanner from GVA.dns_recon import DNSRecon from GVA.geo import geo_ip_recon +from GVA.jwt import JWTAnalyzer from GVA.menus import Menus from GVA.ai_models import NMAP_AI_MODEL from GVA.ai_models import DNS_AI_MODEL +from GVA.ai_models import JWT_AI_MODEL from GVA.assets import Assets from GVA.subdomain import sub_enum from GVA import gui @@ -40,6 +42,7 @@ geo_ip = geo_ip_recon() p_ai_models = NMAP_AI_MODEL() dns_ai_models = DNS_AI_MODEL() port_scanner = NetworkScanner() +jwt_analizer = JWTAnalyzer() sub_recon = sub_enum() asset_codes = Assets() @@ -48,7 +51,7 @@ asset_codes = Assets() lkey = "LLAMA API KEY" lendpoint = "LLAMA ENDPOINT" keyset = "AI API KEY" -target_ip_hostname = "TARGET IP OR HOSTNAME" +target_ip_hostname_or_token = "TARGET IP, HOSTNAME OR TOKEN" profile_num = "PROFILE FOR NMAP SCAN" ai_set = "AI OF CHOICE" akey_set = "OPENAI API KEY" @@ -106,14 +109,14 @@ python gpt_vuln.py --help python gpt_vuln.py --r help # Specify target with the attack -python gpt_vuln.py --target --attack dns/nmap +python gpt_vuln.py --target --attack dns/nmap/jwt # Specify target and profile for nmap -python gpt_vuln.py --target --attack nmap --profile <1-13> +python gpt_vuln.py --target --attack nmap --profile <1-13> (Default:1) # Specify target for DNS no profile needed -python gpt_vuln.py --target --attack dns +python gpt_vuln.py --target --attack dns # Specify target for Subdomain Enumeration no profile used default list file python gpt_vuln.py --target --attack sub @@ -130,6 +133,9 @@ python gpt_vuln.py --target --attack nmap --profile <1-5> --ai llama /llama # Specify the AI to be used for dns python gpt_vuln.py --target --attack dns --ai llama /llama-api /bard / openai +# Specify the AI to be used for JWT analysis +python gpt_vuln.py --target --attack jwt --ai llama /llama-api /bard / openai + # Interactive step by step cli interface python gpt_vuln.py --menu True ``` @@ -154,6 +160,7 @@ python gpt_vuln.py --menu True │ 2 │ DNS Enum │ │ 3 │ Subdomain Enum │ │ 4 │ GEO-IP Enum │ +| 5 | JWT Analysis | │ q │ Quit │ └─────────┴────────────────┘ Enter your choice: @@ -424,6 +431,22 @@ Using the instruction set and the data provided via the prompt the llama AI gene For the most usage I suggest you create a runpod serverless endpoint deployment of llama you can refer to this tutorial for that [tutorial](https://www.youtube.com/watch?v=Ftb4vbGUr7U). Follow the tutorial for better use. ### Output +#### JWT Output: + +``` + GVA Report for JWT +┏━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ +┃ Variables ┃ Results ┃ +┡━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ +│ Algorithm Used │ HS256 │ +│ Header │ eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9 │ +│ Payload │ eyJzdWIiOiAiMTIzNDU2Nzg5MCIsICJuYW1lIjogIkpvaG4gRG9lIiwgImlhdCI6IDE1MTYyMzkwMjJ9 │ +│ Signature │ │ +│ PossibleAttacks │ None identified │ +│ VulnerableEndpoints │ Unable to determine without additional information │ +└─────────────────────┴──────────────────────────────────────────────────────────────────────────────────┘ +``` + #### Nmap output: ##### OpenAI and Bard: diff --git a/commands/__pycache__/assets.cpython-311.pyc b/commands/__pycache__/assets.cpython-311.pyc index b5bae0b..ceba935 100644 Binary files a/commands/__pycache__/assets.cpython-311.pyc and b/commands/__pycache__/assets.cpython-311.pyc differ diff --git a/commands/__pycache__/jwt.cpython-311.pyc b/commands/__pycache__/jwt.cpython-311.pyc new file mode 100644 index 0000000..604683e Binary files /dev/null and b/commands/__pycache__/jwt.cpython-311.pyc differ diff --git a/commands/__pycache__/menus.cpython-311.pyc b/commands/__pycache__/menus.cpython-311.pyc index bf842ae..0caa176 100644 Binary files a/commands/__pycache__/menus.cpython-311.pyc and b/commands/__pycache__/menus.cpython-311.pyc differ diff --git a/commands/__pycache__/models.cpython-311.pyc b/commands/__pycache__/models.cpython-311.pyc index fb02581..8ab6398 100644 Binary files a/commands/__pycache__/models.cpython-311.pyc and b/commands/__pycache__/models.cpython-311.pyc differ diff --git a/commands/__pycache__/port_scanner.cpython-311.pyc b/commands/__pycache__/port_scanner.cpython-311.pyc index bad2eb5..fdb39f5 100644 Binary files a/commands/__pycache__/port_scanner.cpython-311.pyc and b/commands/__pycache__/port_scanner.cpython-311.pyc differ diff --git a/commands/assets.py b/commands/assets.py index 0ac891f..fd047a0 100644 --- a/commands/assets.py +++ b/commands/assets.py @@ -75,6 +75,7 @@ def help_menu() -> None: console.print(table) def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: + jdata = str(jdata) match attack_type: case "Nmap": match ai: @@ -122,6 +123,52 @@ def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: border_style="blue", ) print(message_panel) + case "JWT": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(str(key), str(value)) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(str(key), str(value)) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) case "DNS": match ai: case 'openai': diff --git a/commands/jwt.py b/commands/jwt.py new file mode 100644 index 0000000..dda89fb --- /dev/null +++ b/commands/jwt.py @@ -0,0 +1,71 @@ +import jwt +import json +import base64 +from datetime import datetime +from typing import Optional + + +class JWTAnalyzer: + + def analyze(self, AIModels, token, openai_api_token: Optional[str], bard_api_token: Optional[str], llama_api_token: Optional[str], llama_endpoint: Optional[str], AI: str) -> str: + try: + self.algorithm_used = "" + self.decoded_payload = "" + self.expiration_time = "" + parts = token.split('.') + if len(parts) != 3: + raise ValueError("Invalid token format. Expected 3 parts.") + + header = json.loads(base64.urlsafe_b64decode(parts[0] + '===').decode('utf-8', 'replace')) + self.algorithm_used = header.get('alg', 'Unknown Algorithm') + payload = json.loads(base64.urlsafe_b64decode(parts[1] + '===').decode('utf-8', 'replace')) + self.decoded_payload = payload + self.claims = {key: value for key, value in payload.items()} + if 'exp' in payload: + self.expiration_time = datetime.utcfromtimestamp(payload['exp']) + self.analysis_result = { + 'Algorithm Used': self.algorithm_used, + 'Decoded Payload': self.decoded_payload, + 'Claims': self.claims, + 'Expiration Time': self.expiration_time + } + str_data = str(self.analysis_result) + match AI: + case 'openai': + try: + if openai_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.gpt_ai(str_data, openai_api_token) + except KeyboardInterrupt: + print("Bye") + quit() + case 'bard': + try: + if bard_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.BardAI(bard_api_token, str_data) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama': + try: + response = AIModels.llama_AI(str_data, "local", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama-api': + try: + response = AIModels.Llama_AI(str_data, "runpod", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + final_data = str(response) + return final_data + except jwt.ExpiredSignatureError: + self.analysis_result = {'Error': 'Token has expired.'} + except jwt.InvalidTokenError as e: + self.analysis_result = {'Error': f'Invalid token: {e}'} diff --git a/commands/menus.py b/commands/menus.py index 811d002..afdaefd 100644 --- a/commands/menus.py +++ b/commands/menus.py @@ -1,26 +1,26 @@ -import json import os import platform -from typing import Any from rich import print from rich.console import Console from rich.table import Table from rich.panel import Panel -from rich.console import Group -from rich.align import Align -from rich import box -from rich.markdown import Markdown from commands.dns_recon import DNSRecon from commands.geo import geo_ip_recon from commands.port_scanner import NetworkScanner +from commands.jwt import JWTAnalyzer from commands.models import NMAP_AI_MODEL from commands.models import DNS_AI_MODEL +from commands.models import JWT_AI_MODEL from commands.subdomain import sub_enum +from commands.assets import Assets +assets = Assets() dns_enum = DNSRecon() geo_ip = geo_ip_recon() +jwt_analyzer = JWTAnalyzer() p_ai_models = NMAP_AI_MODEL() dns_ai_models = DNS_AI_MODEL() +jwt_ai_model = JWT_AI_MODEL() port_scanner = NetworkScanner() sub_recon = sub_enum() console = Console() @@ -57,125 +57,6 @@ def clearscr() -> None: class Menus(): - def flatten_json(self, data: Any, separator: Any = '.') -> Any: - flattened_data = {} - for key, value in data.items(): - if isinstance(value, dict): - nested_data = self.flatten_json(value, separator) - for nested_key, nested_value in nested_data.items(): - flattened_data[key + separator + nested_key] = nested_value - else: - flattened_data[key] = value - return flattened_data - - def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: - match attack_type: - case "Nmap": - match ai: - case 'openai': - data = json.loads(jdata) - table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") - table.add_column("Variables", style="cyan") - table.add_column("Results", style="green") - - for key, value in data.items(): - table.add_row(key, value) - print(table) - case 'bard': - data = json.loads(jdata) - table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") - table.add_column("Variables", style="cyan") - table.add_column("Results", style="green") - - for key, value in data.items(): - table.add_row(key, value) - print(table) - case 'llama': - ai_out = Markdown(jdata) - message_panel = Panel( - Align.center( - Group("\n", Align.center(ai_out)), - vertical="middle", - ), - box=box.ROUNDED, - padding=(1, 2), - title="[b red]The GVA LLama2", - border_style="blue", - ) - print(message_panel) - case 'llama-api': - ai_out = Markdown(jdata) - message_panel = Panel( - Align.center( - Group("\n", Align.center(ai_out)), - vertical="middle", - ), - box=box.ROUNDED, - padding=(1, 2), - title="[b red]The GVA LLama2", - border_style="blue", - ) - print(message_panel) - case "DNS": - match ai: - case 'openai': - data = json.loads(jdata) - table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") - table.add_column("Variables", style="cyan") - table.add_column("Results", style="green") - - for key, value in data.items(): - table.add_row(key, value) - print(table) - case 'bard': - data = json.loads(jdata) - table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") - table.add_column("Variables", style="cyan") - table.add_column("Results", style="green") - - for key, value in data.items(): - table.add_row(key, value) - print(table) - case 'llama': - ai_out = Markdown(jdata) - message_panel = Panel( - Align.center( - Group("\n", Align.center(ai_out)), - vertical="middle", - ), - box=box.ROUNDED, - padding=(1, 2), - title="[b red]The GVA LLama2", - border_style="blue", - ) - print(message_panel) - case 'llama-api': - ai_out = Markdown(jdata) - message_panel = Panel( - Align.center( - Group("\n", Align.center(ai_out)), - vertical="middle", - ), - box=box.ROUNDED, - padding=(1, 2), - title="[b red]The GVA LLama2", - border_style="blue", - ) - print(message_panel) - case "GeoIP": - data = json.loads(jdata) - table = Table(title="GVA Report for GeoIP", show_header=True, header_style="bold magenta") - table.add_column("Identifiers", style="cyan") - table.add_column("Data", style="green") - - flattened_data: dict = self.flatten_json(data, separator='.') - - for key, value in flattened_data.items(): - value_str = str(value) - table.add_row(key, value_str) - - console = Console() - console.print(table) def nmap_menu(self) -> None: try: @@ -286,7 +167,7 @@ def nmap_menu(self) -> None: lendpoint=self.lendpoint, AI=self.ai_set ) - self.print_output("Nmap", pout, self.ai_set) + assets.print_output("Nmap", pout, self.ai_set) case "r": clearscr() self.menu_term() @@ -377,7 +258,98 @@ def dns_menu(self) -> None: lendpoint=self.lendpoint, AI=self.ai_set ) - self.print_output("DNS", dns_output, self.ai_set) + assets.print_output("DNS", dns_output, self.ai_set) + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def jwt_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "AI Option") + table.add_row("2", "Set Token") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + option = input("Enter your choice: ") + match option: + case "1": + clearscr() + table0 = Table() + table0.add_column("Options", style="cyan") + table0.add_column("AI Available", style="green") + table0.add_row("1", "OpenAI") + table0.add_row("2", "Bard") + table0.add_row("3", "LLama2") + print(Panel(table0)) + self.ai_set_choice = input("Enter AI of Choice: ") + match self.ai_set_choice: + case "1": + self.ai_set_args, self.ai_set = "openai", "openai" + self.akey_set = input("Enter OpenAI API: ") + print(Panel(f"API-Key Set: {self.akey_set}")) + case "2": + self.ai_set_args, self.ai_set = "bard", "bard" + self.bkey_set = input("Enter Bard AI API: ") + print(Panel(f"API-Key Set: {self.bkey_set}")) + case "3": + clearscr() + tablel = Table() + tablel.add_column("Options", style="cyan") + tablel.add_column("Llama Options", style="cyan") + tablel.add_row("1", "Llama Local") + tablel.add_row("2", "Llama RunPod") + print(tablel) + self.ai_set_choice = input("Enter AI of Choice: ") + self.ai_set_args = "llama" + self.ai_set = "llama" + if self.ai_set_choice == "1": + self.ai_set = "llama" + print(Panel("No Key needed")) + print(Panel("Selected LLama")) + elif self.ai_set_choice == "2": + self.ai_set = "llama-api" + self.llamaendpoint = input("Enter Runpod Endpoint ID: ") + self.llamakey = input("Enter Runpod API Key: ") + print(Panel(f"API-Key Set: {self.llamakey}")) + print(Panel(f"Runpod Endpoint Set: {self.llamaendpoint}")) + self.jwt_menu() + case "2": + clearscr() + print(Panel("Set Token value")) + self.t = input("Enter TOKEN: ") + print(Panel(f"Token Set:{self.t}")) + self.jwt_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("AI Set", str(self.ai_set_args)) + table1.add_row("OpenAI API Key", str(self.akey_set)) + table1.add_row("Bard AI API Key", str(self.bkey_set)) + table1.add_row("Llama Runpod API Key", str(self.llamakey)) + table1.add_row("Runpod Endpoint ID", str(self.llamaendpoint)) + table1.add_row("JWT TOKEN", str(self.t)) + print(Panel(table1)) + self.jwt_menu() + case "4": + clearscr() + JWT_output: str = jwt_analyzer.analyze( + AIModels=jwt_ai_model, + token=self.t, + openai_api_token=self.akey_set, + bard_api_token=self.bkey_set, + llama_api_token=self.lkey, + llama_endpoint=self.lendpoint, + AI=self.ai_set + ) + assets.print_output("JWT", JWT_output, self.ai_set) case "r": clearscr() self.menu_term() @@ -420,7 +392,7 @@ def geo_menu(self) -> None: case "4": clearscr() geo_output: str = geo_ip.geoip(self.keyset, self.t) - self.print_output("GeoIP", str(geo_output), ai="None") + assets.print_output("GeoIP", str(geo_output), ai="None") case "r": clearscr() self.menu_term() @@ -491,6 +463,7 @@ def __init__(self, lkey, lendpoint, keyset, t, profile_num, ai_set, akey_set, bk table.add_row("2", "DNS Enum") table.add_row("3", "Subdomain Enum") table.add_row("4", "GEO-IP Enum") + table.add_row("5", "JWT Analysis") table.add_row("q", "Quit") console.print(table) option = input("Enter your choice: ") @@ -507,6 +480,9 @@ def __init__(self, lkey, lendpoint, keyset, t, profile_num, ai_set, akey_set, bk case "4": clearscr() self.geo_menu() + case "5": + clearscr() + self.jwt_menu() case "q": quit() except KeyboardInterrupt: diff --git a/commands/models.py b/commands/models.py index 247b7ea..9148111 100644 --- a/commands/models.py +++ b/commands/models.py @@ -282,6 +282,138 @@ def GPT_AI(key: str, data: Any) -> str: quit() +class JWT_AI_MODEL(): + @staticmethod + def BardAI(key: str, jwt_data: Any) -> str: + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {jwt_data} + """ + + url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key + + headers = { + "Content-Type": "application/json" + } + + data = { + "prompt": { + "text": prompt + } + } + + response = requests.post(url, json=data, headers=headers) + + if response.status_code == 200: + generated_text = response.json() + jwt_analysis_data = jwt_ai_data_regex(str(generated_text)) + print(jwt_analysis_data) + return jwt_analysis_data + else: + print("Error: Unable to generate text. Status Code:", response.status_code) + return "None" + + @staticmethod + def llama_AI(self, jwt_data: str, mode: str, lkey, lendpoint): + api_url = 'http://localhost:5000/api/chatbot' + + user_instruction = """ + Perform a comprehensive analysis on the provided JWT token. The JWT analysis output must be in a asked format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks + + The output format: + "Header": + - List the JWT header details and security views on them + "Payload": + - List the JWT payload details and security views on them + "Signature": + - Provide insights on the JWT signature + "PossibleAttacks": + - List possible JWT exploits and attacks + """ + user_message = f""" + JWT Token Data to be analyzed: {jwt_data} + """ + + model_name = "TheBloke/Llama-2-7B-Chat-GGML" + file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin" + if mode == "local": + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + elif mode == "runpod": + prompt = f"[INST] <> {user_instruction}<> JWT Token Data to be analyzed: {user_message} [/INST]" + bot_response = self.llama_runpod_api(prompt, lkey, lendpoint) + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + print("test") + if bot_response: + return bot_response + + @staticmethod + def gpt_ai(analyze: str, api_key: Optional[str]) -> str: + openai.api_key = api_key + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {analyze} + """ + try: + messages = [{"content": prompt, "role": "user"}] + response = openai.ChatCompletion.create( + model=model_engine, + messages=messages, + max_tokens=1024, + n=1, + stop=None, + ) + response = response['choices'][0]['message']['content'] + rsp = str(response) + return rsp + except KeyboardInterrupt: + print("Bye") + quit() + + def chat_with_api(api_url: str, user_message: str, user_instruction: str, model_name: str, file_name: str = None) -> Any: # Prepare the request data in JSON format data = { @@ -440,3 +572,61 @@ def nmap_ai_data_regex(json_string: str) -> Any: json_output = json.dumps(data) return json_output + + +def jwt_ai_data_regex(json_string: str) -> Any: + # Define the regular expression patterns for individual values + header_pattern = r'"Header": \{\s*"alg": "(.*?)",\s*"typ": "(.*?)"\s*\}' + payload_pattern = r'"Payload": \{\s*"iss": "(.*?)",\s*"sub": "(.*?)",\s*"aud": "(.*?)",\s*"exp": "(.*?)",\s*"nbf": "(.*?)",\s*"iat": "(.*?)"\s*\}' + signature_pattern = r'"Signature": "(.*?)"' + possible_attacks_pattern = r'"PossibleAttacks": "(.*?)"' + vulnerable_endpoints_pattern = r'"VulnerableEndpoints": "(.*?)"' + + # Initialize variables for extracted data + header = {} + payload = {} + signature = "" + possible_attacks = "" + vulnerable_endpoints = "" + + # Extract individual values using patterns + match_header = re.search(header_pattern, json_string) + if match_header: + header = {"alg": match_header.group(1), "typ": match_header.group(2)} + + match_payload = re.search(payload_pattern, json_string) + if match_payload: + payload = { + "iss": match_payload.group(1), + "sub": match_payload.group(2), + "aud": match_payload.group(3), + "exp": match_payload.group(4), + "nbf": match_payload.group(5), + "iat": match_payload.group(6) + } + + match_signature = re.search(signature_pattern, json_string) + if match_signature: + signature = match_signature.group(1) + + match_attacks = re.search(possible_attacks_pattern, json_string) + if match_attacks: + possible_attacks = match_attacks.group(1) + + match_endpoints = re.search(vulnerable_endpoints_pattern, json_string) + if match_endpoints: + vulnerable_endpoints = match_endpoints.group(1) + + # Create a dictionary to store the extracted data + data = { + "Header": header, + "Payload": payload, + "Signature": signature, + "PossibleAttacks": possible_attacks, + "VulnerableEndpoints": vulnerable_endpoints + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output diff --git a/gpt_vuln.py b/gpt_vuln.py index ac4075f..5d9f8f8 100644 --- a/gpt_vuln.py +++ b/gpt_vuln.py @@ -9,8 +9,10 @@ from commands.dns_recon import DNSRecon from commands.geo import geo_ip_recon from commands.port_scanner import NetworkScanner +from commands.jwt import JWTAnalyzer from commands.models import NMAP_AI_MODEL from commands.models import DNS_AI_MODEL +from commands.models import JWT_AI_MODEL from commands.subdomain import sub_enum from commands.menus import Menus from commands.assets import Assets @@ -20,7 +22,9 @@ geo_ip = geo_ip_recon() p_ai_models = NMAP_AI_MODEL() dns_ai_models = DNS_AI_MODEL() +jwt_ai_models = JWT_AI_MODEL() port_scanner = NetworkScanner() +jwt_analizer = JWTAnalyzer() sub_recon = sub_enum() asset_codes = Assets() load_dotenv() @@ -35,14 +39,15 @@ parser = argparse.ArgumentParser( description='Python-Nmap and chatGPT intigrated Vulnerability scanner') parser.add_argument('--target', metavar='target', type=str, - help='Target IP or hostname') + help='Target IP or hostname or JWT token') parser.add_argument('--profile', metavar='profile', type=int, default=1, - help='Enter Profile of scan 1-5 (Default: 1)', required=False) + help='Enter Profile of scan 1-13 (Default: 1)', required=False) parser.add_argument('--attack', metavar='attack', type=str, help=''' Enter Attack type nmap, dns or sub. sub - Subdomain Enumeration using the default array. dns - to perform DNS Enumeration and get openion from Chat-GPT + jwt - Analyze JWT tokens and the related information ''', required=False) parser.add_argument('--list', metavar='list', type=str, help=''' @@ -138,6 +143,17 @@ def main(target: Any) -> None: case 'sub': sub_output: str = sub_recon.sub_enumerator(target, list_loc) console.print(sub_output, style="bold underline") + case 'jwt': + output: str = jwt_analizer.analyze( + AIModels=jwt_ai_models, + token=target, + openai_api_token=akey, + bard_api_token=bkey, + llama_api_token=lkey, + llama_endpoint=lendpoint, + AI=ai + ) + asset_codes.print_output("JWT", output, ai) except KeyboardInterrupt: console.print_exception("Bye") quit() diff --git a/package/GVA.egg-info/PKG-INFO b/package/GVA.egg-info/PKG-INFO new file mode 100644 index 0000000..deeca84 --- /dev/null +++ b/package/GVA.egg-info/PKG-INFO @@ -0,0 +1,257 @@ +Metadata-Version: 2.1 +Name: GVA +Version: 1.1.8 +Summary: Python Project for GPT-Vuln_analyzer +Home-page: https://github.com/morpheuslord/GVA_package +Author: Chiranjeevi G +Author-email: morpheuslord@protonmail.com +Keywords: python,GPT,vulnerability,ai,vulnerability-assessment,network-scanning +Classifier: Development Status :: 1 - Planning +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python :: 3 +Classifier: Operating System :: Unix +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Requires-Python: >=3.10 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: aiohttp==3.8.4 +Requires-Dist: aiosignal==1.3.1 +Requires-Dist: async-timeout==4.0.2 +Requires-Dist: attrs==22.2.0 +Requires-Dist: certifi==2022.12.7 +Requires-Dist: charset-normalizer==3.0.1 +Requires-Dist: frozenlist==1.3.3 +Requires-Dist: idna==3.4 +Requires-Dist: multidict==6.0.4 +Requires-Dist: openai==0.27.0 +Requires-Dist: python-nmap==0.7.1 +Requires-Dist: requests==2.28.2 +Requires-Dist: tqdm==4.65.0 +Requires-Dist: urllib3==1.26.14 +Requires-Dist: yarl==1.8.2 +Requires-Dist: dnspython +Requires-Dist: rich +Requires-Dist: cowsay +Requires-Dist: tk +Requires-Dist: customtkinter + + +# GPT_Vuln-analyzer + +This is a Proof Of Concept application that demostrates how AI can be used to generate accurate results for vulnerability analysis and also allows further utilization of the already super useful ChatGPT made using openai-api, python-nmap, dnsresolver python modules and also use customtkinter and tkinter for the GUI version of the code. This project also has a CLI and a GUI interface, It is capable of doing network vulnerability analysis, DNS enumeration and also subdomain enumeration. + +## Requirements +- Python 3.10 +- All the packages mentioned in the requirements.txt file +- OpenAi api + +## Usage Package + +### Import packages +`pip install GVA` +or +`pip3 install GVA` + +Simple import any of the 3 packages and then add define the variables accordingly +```python +from GVA import profile +from GVA import dns +from GVA import subdomain + +key = "__API__KEY__" +profile.openai.api_key = key +dns.openai.api_key = key + +print(profile.p1("")) +print(dns.dnsr("")) +subdomain.sub("") +``` + +## Usage CLI + +- First Change the "__API__KEY__" part of the code with OpenAI api key +```python +akey = "__API__KEY__" # Enter your API key +``` +- second install the packages +```bash +pip3 install -r requirements.txt +or +pip install -r requirements.txt +``` +- run the code python3 gpt_vuln.py +```bash +# Regular Help Menu +python gpt_vuln.py --help + +# Rich Help Menu +python get_vuln.py --r help + +# Specify target with the attack +python gpt_vuln.py --target --attack dns/nmap + +# Specify target and profile for nmap +python get_vuln.py --target --attack nmap --profile <1-5> +(Default:1) + +# Specify target for DNS no profile needed +python get_vuln.py --target --attack dns + +# Specify target for Subdomain Enumeration no profile needed +python get_vuln.py --target --attack sub +``` + +Supported in both windows and linux + +## Understanding the code + +Profiles: + +| Parameter | Return data | Description | Nmap Command | +| :-------- | :------- | :-------------------------------- | :---------| +| `p1` | `json` | Effective Scan | `-Pn -sV -T4 -O -F`| +| `p2` | `json` | Simple Scan | `-Pn -T4 -A -v`| +| `p3` | `json` | Low Power Scan | `-Pn -sS -sU -T4 -A -v`| +| `p4` | `json` | Partial Intense Scan | `-Pn -p- -T4 -A -v`| +| `p5` | `json` | Complete Intense Scan | `-Pn -sS -sU -T4 -A -PE -PP -PS80,443 -PA3389 -PU40125 -PY -g 53 --script=vuln`| + +The profile is the type of scan that will be executed by the nmap subprocess. The Ip or target will be provided via argparse. At first the custom nmap scan is run which has all the curcial arguments for the scan to continue. nextly the scan data is extracted from the huge pile of data which has been driven by nmap. the "scan" object has a list of sub data under "tcp" each labled according to the ports opened. once the data is extracted the data is sent to openai API davenci model via a prompt. the prompt specifically asks for an JSON output and the data also to be used in a certain manner. + +The entire structure of request that has to be sent to the openai API is designed in the completion section of the Program. +```python +def profile(ip): + nm.scan('{}'.format(ip), arguments='-Pn -sS -sU -T4 -A -PE -PP -PS80,443 -PA3389 -PU40125 -PY -g 53 --script=vuln') + json_data = nm.analyse_nmap_xml_scan() + analize = json_data["scan"] + # Prompt about what the quary is all about + prompt = "do a vulnerability analysis of {} and return a vulnerabilty report in json".format(analize) + # A structure for the request + completion = openai.Completion.create( + engine=model_engine, + prompt=prompt, + max_tokens=1024, + n=1, + stop=None, + ) + response = completion.choices[0].text + return response +``` +### Output +nmap output: +```json +{ + "Vulnerability Report": { + "Target IP": "127.0.0.1", + "OS Detected": { + "Name": "Microsoft Windows 10 1607", + "Accuracy": "100", + "CPE": [ + "cpe:/o:microsoft:windows_10:1607" + ] + }, + "Open Ports": { + "Port 135": { + "State": "open", + "Reason": "syn-ack", + "Name": "msrpc", + "Product": "Microsoft Windows RPC", + "Version": "", + "Extra Info": "", + "Conf": "10", + "CPE": "cpe:/o:microsoft:windows" + }, + "Port 445": { + "State": "open", + "Reason": "syn-ack", + "Name": "microsoft-ds", + "Product": "", + "Version": "", + "Extra Info": "", + "Conf": "3", + "CPE": "" + } + }, + "Vulnerabilities": { + "Port 135": [], + "Port 445": [] + } + } +} +``` +DNS Output: +target is google.com +```json + +{ + "A" : { + "ip": "142.250.195.174", + }, + "AAAA": { + "ip": "2404:6800:4007:826::200e" + }, + "NS": { + "nameservers": [ + "ns2.google.com.", + "ns1.google.com.", + "ns3.google.com.", + "ns4.google.com." + ] + }, + "MX" : { + "smtp": "10 smtp.google.com." + }, + "SOA" : { + "nameserver": "ns1.google.com.", + "admin": "dns-admin.google.com.", + "serial": "519979037", + "refresh": "900", + "retry": "900", + "expire": "1800", + "ttl": "60" + }, + "TXT": { + "onetrust-domain-verification": "de01ed21f2fa4d8781cbc3ffb89cf4ef", + "webexdomainverification.8YX6G": "6e6922db-e3e6-4a36-904e-a805c28087fa", + "globalsign-smime-dv": "CDYX+XFHUw2wml6/Gb8+59BsH31KzUr6c1l2BPvqKX8=", + "google-site-verification": [ + "wD8N7i1JTNTkezJ49swvWW48f8_9xveREV4oB-0Hf5o", + "TV9-DBe4R80X4v0M4U_bd_J9cpOJM0nikft0jAgjmsQ" + ], + "docusign": [ + "05958488-4752-4ef2-95eb-aa7ba8a3bd0e", + "1b0a6754-49b1-4db5-8540-d2c12664b289" + ], + "atlassian-domain-verification": "5YjTmWmjI92ewqkx2oXmBaD60Td9zWon9r6eakvHX6B77zzkFQto8PQ9QsKnbf4I", + "v=spf1 include:_spf.google.com ~all": "v=spf1 include:_spf.google.com ~all", + "facebook-domain-verification": "22rm551cu4k0ab0bxsw536tlds4h95", + "MS=E4A68B9AB2BB9670BCE15412F62916164C0B20BB": "MS=E4A68B9AB2BB9670BCE15412F62916164C0B20BB", + "apple-domain-verification": "30afIBcvSuDV2PLX" + } +} +``` + +# Usage GUI +The GUI uses customtkinter for the running of the code. The interface is straight forward the only thing required to remember is: +- When using dns attack dont specify the profile + +```bash +python GVA_gui.py +``` + +### main window +![main](https://user-images.githubusercontent.com/70637311/228863455-993e0a21-c06c-44c7-87e6-68d758a78e2c.jpeg) + +### output_DNS +![dns_output](https://user-images.githubusercontent.com/70637311/228863540-553f8560-fdf5-48f7-96e8-1f831ab3a8f2.png) + +### output_nmap +![nmap_output](https://user-images.githubusercontent.com/70637311/228863611-5d8380f0-28d5-4925-9ad3-62cd28a1ecd4.png) + +## Advantage + +- Can be used in developing a more advanced systems completly made of the API and scanner combination +- Has the capability to analize DNS information and reslove Mustiple records it a more better format. +- Can increase the effectiveness of the final system +- Can also perform subdomain enumeration +- Highly productive when working with models such as GPT3 diff --git a/package/GVA.egg-info/SOURCES.txt b/package/GVA.egg-info/SOURCES.txt new file mode 100644 index 0000000..62c9105 --- /dev/null +++ b/package/GVA.egg-info/SOURCES.txt @@ -0,0 +1,22 @@ +LICENSE +MANIFEST.in +README.md +pyproject.toml +setup.cfg +setup.py +GVA/__init__.py +GVA/ai_models.py +GVA/assets.py +GVA/dns_recon.py +GVA/geo.py +GVA/gui.py +GVA/jwt.py +GVA/menus.py +GVA/requirements.txt +GVA/scanner.py +GVA/subdomain.py +GVA.egg-info/PKG-INFO +GVA.egg-info/SOURCES.txt +GVA.egg-info/dependency_links.txt +GVA.egg-info/requires.txt +GVA.egg-info/top_level.txt \ No newline at end of file diff --git a/package/GVA.egg-info/dependency_links.txt b/package/GVA.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/package/GVA.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/package/GVA.egg-info/requires.txt b/package/GVA.egg-info/requires.txt new file mode 100644 index 0000000..c3a6004 --- /dev/null +++ b/package/GVA.egg-info/requires.txt @@ -0,0 +1,20 @@ +aiohttp==3.8.4 +aiosignal==1.3.1 +async-timeout==4.0.2 +attrs==22.2.0 +certifi==2022.12.7 +charset-normalizer==3.0.1 +frozenlist==1.3.3 +idna==3.4 +multidict==6.0.4 +openai==0.27.0 +python-nmap==0.7.1 +requests==2.28.2 +tqdm==4.65.0 +urllib3==1.26.14 +yarl==1.8.2 +dnspython +rich +cowsay +tk +customtkinter diff --git a/package/GVA.egg-info/top_level.txt b/package/GVA.egg-info/top_level.txt new file mode 100644 index 0000000..4fcc19b --- /dev/null +++ b/package/GVA.egg-info/top_level.txt @@ -0,0 +1 @@ +GVA diff --git a/package/GVA/ai_models.py b/package/GVA/ai_models.py index 247b7ea..9148111 100644 --- a/package/GVA/ai_models.py +++ b/package/GVA/ai_models.py @@ -282,6 +282,138 @@ def GPT_AI(key: str, data: Any) -> str: quit() +class JWT_AI_MODEL(): + @staticmethod + def BardAI(key: str, jwt_data: Any) -> str: + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {jwt_data} + """ + + url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key + + headers = { + "Content-Type": "application/json" + } + + data = { + "prompt": { + "text": prompt + } + } + + response = requests.post(url, json=data, headers=headers) + + if response.status_code == 200: + generated_text = response.json() + jwt_analysis_data = jwt_ai_data_regex(str(generated_text)) + print(jwt_analysis_data) + return jwt_analysis_data + else: + print("Error: Unable to generate text. Status Code:", response.status_code) + return "None" + + @staticmethod + def llama_AI(self, jwt_data: str, mode: str, lkey, lendpoint): + api_url = 'http://localhost:5000/api/chatbot' + + user_instruction = """ + Perform a comprehensive analysis on the provided JWT token. The JWT analysis output must be in a asked format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks + + The output format: + "Header": + - List the JWT header details and security views on them + "Payload": + - List the JWT payload details and security views on them + "Signature": + - Provide insights on the JWT signature + "PossibleAttacks": + - List possible JWT exploits and attacks + """ + user_message = f""" + JWT Token Data to be analyzed: {jwt_data} + """ + + model_name = "TheBloke/Llama-2-7B-Chat-GGML" + file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin" + if mode == "local": + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + elif mode == "runpod": + prompt = f"[INST] <> {user_instruction}<> JWT Token Data to be analyzed: {user_message} [/INST]" + bot_response = self.llama_runpod_api(prompt, lkey, lendpoint) + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + print("test") + if bot_response: + return bot_response + + @staticmethod + def gpt_ai(analyze: str, api_key: Optional[str]) -> str: + openai.api_key = api_key + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {analyze} + """ + try: + messages = [{"content": prompt, "role": "user"}] + response = openai.ChatCompletion.create( + model=model_engine, + messages=messages, + max_tokens=1024, + n=1, + stop=None, + ) + response = response['choices'][0]['message']['content'] + rsp = str(response) + return rsp + except KeyboardInterrupt: + print("Bye") + quit() + + def chat_with_api(api_url: str, user_message: str, user_instruction: str, model_name: str, file_name: str = None) -> Any: # Prepare the request data in JSON format data = { @@ -440,3 +572,61 @@ def nmap_ai_data_regex(json_string: str) -> Any: json_output = json.dumps(data) return json_output + + +def jwt_ai_data_regex(json_string: str) -> Any: + # Define the regular expression patterns for individual values + header_pattern = r'"Header": \{\s*"alg": "(.*?)",\s*"typ": "(.*?)"\s*\}' + payload_pattern = r'"Payload": \{\s*"iss": "(.*?)",\s*"sub": "(.*?)",\s*"aud": "(.*?)",\s*"exp": "(.*?)",\s*"nbf": "(.*?)",\s*"iat": "(.*?)"\s*\}' + signature_pattern = r'"Signature": "(.*?)"' + possible_attacks_pattern = r'"PossibleAttacks": "(.*?)"' + vulnerable_endpoints_pattern = r'"VulnerableEndpoints": "(.*?)"' + + # Initialize variables for extracted data + header = {} + payload = {} + signature = "" + possible_attacks = "" + vulnerable_endpoints = "" + + # Extract individual values using patterns + match_header = re.search(header_pattern, json_string) + if match_header: + header = {"alg": match_header.group(1), "typ": match_header.group(2)} + + match_payload = re.search(payload_pattern, json_string) + if match_payload: + payload = { + "iss": match_payload.group(1), + "sub": match_payload.group(2), + "aud": match_payload.group(3), + "exp": match_payload.group(4), + "nbf": match_payload.group(5), + "iat": match_payload.group(6) + } + + match_signature = re.search(signature_pattern, json_string) + if match_signature: + signature = match_signature.group(1) + + match_attacks = re.search(possible_attacks_pattern, json_string) + if match_attacks: + possible_attacks = match_attacks.group(1) + + match_endpoints = re.search(vulnerable_endpoints_pattern, json_string) + if match_endpoints: + vulnerable_endpoints = match_endpoints.group(1) + + # Create a dictionary to store the extracted data + data = { + "Header": header, + "Payload": payload, + "Signature": signature, + "PossibleAttacks": possible_attacks, + "VulnerableEndpoints": vulnerable_endpoints + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output diff --git a/package/GVA/jwt.py b/package/GVA/jwt.py new file mode 100644 index 0000000..dda89fb --- /dev/null +++ b/package/GVA/jwt.py @@ -0,0 +1,71 @@ +import jwt +import json +import base64 +from datetime import datetime +from typing import Optional + + +class JWTAnalyzer: + + def analyze(self, AIModels, token, openai_api_token: Optional[str], bard_api_token: Optional[str], llama_api_token: Optional[str], llama_endpoint: Optional[str], AI: str) -> str: + try: + self.algorithm_used = "" + self.decoded_payload = "" + self.expiration_time = "" + parts = token.split('.') + if len(parts) != 3: + raise ValueError("Invalid token format. Expected 3 parts.") + + header = json.loads(base64.urlsafe_b64decode(parts[0] + '===').decode('utf-8', 'replace')) + self.algorithm_used = header.get('alg', 'Unknown Algorithm') + payload = json.loads(base64.urlsafe_b64decode(parts[1] + '===').decode('utf-8', 'replace')) + self.decoded_payload = payload + self.claims = {key: value for key, value in payload.items()} + if 'exp' in payload: + self.expiration_time = datetime.utcfromtimestamp(payload['exp']) + self.analysis_result = { + 'Algorithm Used': self.algorithm_used, + 'Decoded Payload': self.decoded_payload, + 'Claims': self.claims, + 'Expiration Time': self.expiration_time + } + str_data = str(self.analysis_result) + match AI: + case 'openai': + try: + if openai_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.gpt_ai(str_data, openai_api_token) + except KeyboardInterrupt: + print("Bye") + quit() + case 'bard': + try: + if bard_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.BardAI(bard_api_token, str_data) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama': + try: + response = AIModels.llama_AI(str_data, "local", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama-api': + try: + response = AIModels.Llama_AI(str_data, "runpod", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + final_data = str(response) + return final_data + except jwt.ExpiredSignatureError: + self.analysis_result = {'Error': 'Token has expired.'} + except jwt.InvalidTokenError as e: + self.analysis_result = {'Error': f'Invalid token: {e}'} diff --git a/package/GVA/menus.py b/package/GVA/menus.py index 74837c4..803bbfb 100644 --- a/package/GVA/menus.py +++ b/package/GVA/menus.py @@ -13,15 +13,19 @@ from GVA.dns_recon import DNSRecon from GVA.geo import geo_ip_recon from GVA.scanner import NetworkScanner +from GVA.subdomain import sub_enum +from GVA.jwt import JWTAnalyzer from GVA.ai_models import NMAP_AI_MODEL from GVA.ai_models import DNS_AI_MODEL -from GVA.subdomain import sub_enum +from GVA.ai_models import JWT_AI_MODEL dns_enum = DNSRecon() geo_ip = geo_ip_recon() p_ai_models = NMAP_AI_MODEL() dns_ai_models = DNS_AI_MODEL() +jwt_ai_model = JWT_AI_MODEL() port_scanner = NetworkScanner() +jwt_analyzer = JWTAnalyzer() sub_recon = sub_enum() console = Console() target = "" @@ -116,6 +120,52 @@ def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: border_style="blue", ) print(message_panel) + case "JWT": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(str(key), str(value)) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(str(key), str(value)) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) case "DNS": match ai: case 'openai': @@ -419,6 +469,97 @@ def geo_menu(self) -> None: except KeyboardInterrupt: print(Panel("Exiting Program")) + def jwt_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "AI Option") + table.add_row("2", "Set Token") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + option = input("Enter your choice: ") + match option: + case "1": + clearscr() + table0 = Table() + table0.add_column("Options", style="cyan") + table0.add_column("AI Available", style="green") + table0.add_row("1", "OpenAI") + table0.add_row("2", "Bard") + table0.add_row("3", "LLama2") + print(Panel(table0)) + self.ai_set_choice = input("Enter AI of Choice: ") + match self.ai_set_choice: + case "1": + self.ai_set_args, self.ai_set = "openai", "openai" + self.akey_set = input("Enter OpenAI API: ") + print(Panel(f"API-Key Set: {self.akey_set}")) + case "2": + self.ai_set_args, self.ai_set = "bard", "bard" + self.bkey_set = input("Enter Bard AI API: ") + print(Panel(f"API-Key Set: {self.bkey_set}")) + case "3": + clearscr() + tablel = Table() + tablel.add_column("Options", style="cyan") + tablel.add_column("Llama Options", style="cyan") + tablel.add_row("1", "Llama Local") + tablel.add_row("2", "Llama RunPod") + print(tablel) + self.ai_set_choice = input("Enter AI of Choice: ") + self.ai_set_args = "llama" + self.ai_set = "llama" + if self.ai_set_choice == "1": + self.ai_set = "llama" + print(Panel("No Key needed")) + print(Panel("Selected LLama")) + elif self.ai_set_choice == "2": + self.ai_set = "llama-api" + self.llamaendpoint = input("Enter Runpod Endpoint ID: ") + self.llamakey = input("Enter Runpod API Key: ") + print(Panel(f"API-Key Set: {self.llamakey}")) + print(Panel(f"Runpod Endpoint Set: {self.llamaendpoint}")) + self.jwt_menu() + case "2": + clearscr() + print(Panel("Set Token value")) + self.t = input("Enter TOKEN: ") + print(Panel(f"Token Set:{self.t}")) + self.jwt_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("AI Set", str(self.ai_set_args)) + table1.add_row("OpenAI API Key", str(self.akey_set)) + table1.add_row("Bard AI API Key", str(self.bkey_set)) + table1.add_row("Llama Runpod API Key", str(self.llamakey)) + table1.add_row("Runpod Endpoint ID", str(self.llamaendpoint)) + table1.add_row("JWT TOKEN", str(self.t)) + print(Panel(table1)) + self.jwt_menu() + case "4": + clearscr() + JWT_output: str = jwt_analyzer.analyze( + AIModels=jwt_ai_model, + token=self.t, + openai_api_token=self.akey_set, + bard_api_token=self.bkey_set, + llama_api_token=self.lkey, + llama_endpoint=self.lendpoint, + AI=self.ai_set + ) + self.print_output("JWT", JWT_output, self.ai_set) + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + def sub_menu(self) -> None: try: table = Table() @@ -499,6 +640,9 @@ def __init__(self, lamma_key, llama_api_endpoint, initial_keyset, target, profil case "4": clearscr() self.geo_menu() + case "5": + clearscr() + self.jwt_menu() case "q": quit() except KeyboardInterrupt: diff --git a/package/build/lib/GVA/__init__.py b/package/build/lib/GVA/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/package/build/lib/GVA/ai_models.py b/package/build/lib/GVA/ai_models.py new file mode 100644 index 0000000..9148111 --- /dev/null +++ b/package/build/lib/GVA/ai_models.py @@ -0,0 +1,632 @@ +import json +import re +from typing import Any +from typing import Optional +import openai +import requests +model_engine = "gpt-3.5-turbo-0613" + + +class DNS_AI_MODEL(): + @staticmethod + def BardAI(key: str, data: Any) -> str: + prompt = f""" + Do a DNS analysis on the provided DNS scan information + The DNS output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The DNS scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given + 3) The final output must be kept to a minimal + + The output format: + {{ + "A": [""], + "AAA": [""], + "NS": [""], + "MX": [""], + "PTR": [""], + "SOA": [""], + "TXT": [""] + }} + DNS Data to be analyzed: {data} + """ + + url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key + + headers = { + "Content-Type": "application/json" + } + + data = { + "prompt": { + "text": prompt + } + } + + response = requests.post(url, json=data, headers=headers) + + if response.status_code == 200: + generated_text = response.json() + data = dns_ai_data_regex(str(generated_text)) + print(data) + return dns_ai_data_regex(str(generated_text)) + else: + print("Error: Unable to generate text. Status Code:", response.status_code) + return "None" + + @staticmethod + def llama_AI(self, data: str, mode: str, lkey, lendpoint): + api_url = 'http://localhost:5000/api/chatbot' + + user_instruction = """ + Do a DNS scan analysis on the provided DNS scan information. The DNS output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The DNS scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given + 3) The final output must be kept to a minimal + 4) So the analysis and provide your view according to the given format + 5) Remember to provide views as a security engineer or an security analyst. + The output format: + "A": + - List the A records and security views on them + "AAA": + - List the AAA records and security views on them + "NS": + - List the NS records and security views on them + "MX": + - List the MX records and security views on them + "PTR": + - List the PTR records and security views on them + "SOA": + - List the SOA records and security views on them + "TXT": + - List the TXT records and security views on them + """ + user_message = f""" + DNS Data to be analyzed: {data} + """ + + model_name = "TheBloke/Llama-2-7B-Chat-GGML" + file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin" + if mode == "local": + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + elif mode == "runpod": + prompt = f"[INST] <> {user_instruction}<> NMAP Data to be analyzed: {user_message} [/INST]" + bot_response = self.llama_runpod_api(prompt, lkey, lendpoint) + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + print("test") + if bot_response: + return bot_response + + @staticmethod + def gpt_ai(analyze: str, key: Optional[str]) -> str: + openai.api_key = key + prompt = f""" + Do a DNS analysis on the provided DNS scan information + The DNS output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The DNS scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given + 3) The final output must be kept to a minimal + + The output format: + {{ + "A": [""], + "AAA": [""], + "NS": [""], + "MX": [""], + "PTR": [""], + "SOA": [""], + "TXT": [""] + }} + + DNS Data to be analyzed: {analyze} + """ + try: + # A structure for the request + messages = [{"content": prompt, "role": "user"}] + # A structure for the request + response = openai.ChatCompletion.create( + model=model_engine, + messages=messages, + max_tokens=1024, + n=1, + stop=None, + ) + response = response['choices'][0]['message']['content'] + return dns_ai_data_regex(str(response)) + except KeyboardInterrupt: + print("Bye") + quit() + + +class NMAP_AI_MODEL(): + @staticmethod + def BardAI(key: str, data: Any) -> str: + prompt = f""" + Do a NMAP scan analysis on the provided NMAP scan information + The NMAP output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The NMAP scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given. + 3) The final output must be kept to a minimal. + 4) If a value not found in the scan just mention an empty string. + 5) Analyze everything even the smallest of data. + 6) Completely analyze the data provided and give a confirm answer using the output format. + + The output format: + {{ + "critical score": [""], + "os information": [""], + "open ports": [""], + "open services": [""], + "vulnerable service": [""], + "found cve": [""] + }} + + NMAP Data to be analyzed: {data} + """ + + url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key + + headers = { + "Content-Type": "application/json" + } + + data = { + "prompt": { + "text": prompt + } + } + + response = requests.post(url, json=data, headers=headers) + + if response.status_code == 200: + generated_text = response.json() + return nmap_ai_data_regex(str(generated_text)) + else: + print("Error: Unable to generate text. Status Code:", response.status_code) + return "None" + + @staticmethod + def Llama_AI(data: str, mode: str, lkey: str, lendpoint: str) -> Any: + api_url = 'http://localhost:5000/api/chatbot' + + user_instruction = """ + Do a NMAP scan analysis on the provided NMAP scan information. The NMAP output must return in a asked format accorging to the provided output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The NMAP scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given. + 3) The final output must be kept to a minimal. + 4) If a value not found in the scan just mention an empty string. + 5) Analyze everything even the smallest of data. + 6) Completely analyze the data provided and give a confirm answer using the output format. + 7) mention all the data you found in the output format provided so that regex can be used on it. + 8) avoid unnecessary explaination. + 9) the critical score must be calculated based on the CVE if present or by the nature of the services open + 10) the os information must contain the OS used my the target. + 11) the open ports must include all the open ports listed in the data[tcp] and varifying if it by checking its states value. you should not negect even one open port. + 12) the vulnerable services can be determined via speculation of the service nature or by analyzing the CVE's found. + The output format: + critical score: + - Give info on the criticality + "os information": + - List out the OS information + "open ports and services": + - List open ports + - List open ports services + "vulnerable service": + - Based on CVEs or nature of the ports opened list the vulnerable services + "found cve": + - List the CVE's found and list the main issues. + """ + user_message = f""" + NMAP Data to be analyzed: {data} + """ + model_name = "TheBloke/Llama-2-7B-Chat-GGML" + file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin" + if mode == "local": + bot_response = chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + elif mode == "runpod": + prompt = f"[INST] <> {user_instruction}<> NMAP Data to be analyzed: {user_message} [/INST]" + bot_response = llama_runpod_api(prompt, lkey, lendpoint) + if bot_response: + return bot_response + + @staticmethod + def GPT_AI(key: str, data: Any) -> str: + openai.api_key = key + try: + prompt = f""" + Do a NMAP scan analysis on the provided NMAP scan information + The NMAP output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The NMAP scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given. + 3) The final output must be kept to a minimal. + 4) If a value not found in the scan just mention an empty string. + 5) Analyze everything even the smallest of data. + 6) Completely analyze the data provided and give a confirm answer using the output format. + + The output format: + {{ + "critical score": [""], + "os information": [""], + "open ports": [""], + "open services": [""], + "vulnerable service": [""], + "found cve": [""] + }} + + NMAP Data to be analyzed: {data} + """ + # A structure for the request + messages = [{"content": prompt, "role": "assistant"}] + # A structure for the request + response = openai.ChatCompletion.create( + model=model_engine, + messages=messages, + max_tokens=2500, + n=1, + stop=None, + ) + response = response['choices'][0]['message']['content'] + rsp = str(response) + return str(nmap_ai_data_regex(rsp)) + except KeyboardInterrupt: + print("Bye") + quit() + + +class JWT_AI_MODEL(): + @staticmethod + def BardAI(key: str, jwt_data: Any) -> str: + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {jwt_data} + """ + + url = "https://generativelanguage.googleapis.com/v1beta2/models/text-bison-001:generateText?key=" + key + + headers = { + "Content-Type": "application/json" + } + + data = { + "prompt": { + "text": prompt + } + } + + response = requests.post(url, json=data, headers=headers) + + if response.status_code == 200: + generated_text = response.json() + jwt_analysis_data = jwt_ai_data_regex(str(generated_text)) + print(jwt_analysis_data) + return jwt_analysis_data + else: + print("Error: Unable to generate text. Status Code:", response.status_code) + return "None" + + @staticmethod + def llama_AI(self, jwt_data: str, mode: str, lkey, lendpoint): + api_url = 'http://localhost:5000/api/chatbot' + + user_instruction = """ + Perform a comprehensive analysis on the provided JWT token. The JWT analysis output must be in a asked format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks + + The output format: + "Header": + - List the JWT header details and security views on them + "Payload": + - List the JWT payload details and security views on them + "Signature": + - Provide insights on the JWT signature + "PossibleAttacks": + - List possible JWT exploits and attacks + """ + user_message = f""" + JWT Token Data to be analyzed: {jwt_data} + """ + + model_name = "TheBloke/Llama-2-7B-Chat-GGML" + file_name = "llama-2-7b-chat.ggmlv3.q4_K_M.bin" + if mode == "local": + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + elif mode == "runpod": + prompt = f"[INST] <> {user_instruction}<> JWT Token Data to be analyzed: {user_message} [/INST]" + bot_response = self.llama_runpod_api(prompt, lkey, lendpoint) + bot_response = self.chat_with_api(api_url, user_message, user_instruction, model_name, file_name) + print("test") + if bot_response: + return bot_response + + @staticmethod + def gpt_ai(analyze: str, api_key: Optional[str]) -> str: + openai.api_key = api_key + prompt = f""" + Perform a comprehensive analysis on the provided JWT token. The analysis output must be in a JSON format according to the provided output structure. Ensure accuracy for inclusion in a penetration testing report. + Follow these guidelines: + 1) Analyze the JWT token from a pentester's perspective + 2) Keep the final output minimal while adhering to the given format + 3) Highlight JWT-specific details and enumerate possible attacks and vulnerabilities + 5) For the output "Algorithm Used" value use the Algorithm value from the JWT data. + 6) For the output "Header" value use the Header value from the JWT data. + 7) For the "Payload" Use the decoded payloads as a reference and then analyze any attack endpoints. + 8) For "Signature" mention the signatures discovered. + 9) List a few endpoints you feel are vulnerable for "VulnerableEndpoints" + + The output format: + {{ + "Algorithm Used": "", + "Header": "", + "Payload": "", + "Signature": "", + "PossibleAttacks": "", + "VulnerableEndpoints": "" + }} + + JWT Token Data to be analyzed: {analyze} + """ + try: + messages = [{"content": prompt, "role": "user"}] + response = openai.ChatCompletion.create( + model=model_engine, + messages=messages, + max_tokens=1024, + n=1, + stop=None, + ) + response = response['choices'][0]['message']['content'] + rsp = str(response) + return rsp + except KeyboardInterrupt: + print("Bye") + quit() + + +def chat_with_api(api_url: str, user_message: str, user_instruction: str, model_name: str, file_name: str = None) -> Any: + # Prepare the request data in JSON format + data = { + 'user_message': user_message, + 'model_name': model_name, + 'file_name': file_name, + 'user_instruction': user_instruction + } + + # Send the POST request to the API + response = requests.post(api_url, json=data) + + # Check if the request was successful (status code 200) + if response.status_code == 200: + return response.json()['bot_response'] + else: + # If there was an error, print the error message + print(f"Error: {response.status_code} - {response.text}") + return None + + +def llama_runpod_api(prompt: str, lkey: str, lendpoint: str) -> Any: + url = f"https://api.runpod.ai/v2/{lendpoint}/runsync" + payload = json.dumps({ + "input": { + "prompt": prompt, + "max_new_tokens": 4500, + "temperature": 0.9, + "top_k": 50, + "top_p": 0.7, + "repetition_penalty": 1.2, + "batch_size": 8, + "stop": [ + "" + ] + } + }) + headers = { + 'Content-Type': 'application/json', + 'Authorization': f'Bearer {lkey}', + } + response = requests.request("POST", url, headers=headers, data=payload) + response_t = json.loads(response.text) + return response_t["output"] + + +def dns_ai_data_regex(json_string: str) -> Any: + # Define the regular expression patterns for individual values + A_pattern = r'"A": \["(.*?)"\]' + AAA_pattern = r'"AAA: \["(.*?)"\]' + NS_pattern = r'"NS": \["(.*?)"\]' + MX_pattern = r'"MX": \["(.*?)"\]' + PTR_pattern = r'"PTR": \["(.*?)"\]' + SOA_pattern = r'"SOA": \["(.*?)"\]' + TXT_pattern = r'"TXT": \["(.*?)"\]' + + # Initialize variables for extracted data + A = None + AAA = None + NS = None + MX = None + PTR = None + SOA = None + TXT = None + + # Extract individual values using patterns + match = re.search(A_pattern, json_string) + if match: + A = match.group(1) + match = re.search(AAA_pattern, json_string) + if match: + AAA = match.group(1) + match = re.search(NS_pattern, json_string) + if match: + NS = match.group(1) + match = re.search(MX_pattern, json_string) + if match: + MX = match.group(1) + match = re.search(PTR_pattern, json_string) + if match: + PTR = match.group(1) + match = re.search(SOA_pattern, json_string) + if match: + SOA = match.group(1) + match = re.search(TXT_pattern, json_string) + if match: + TXT = match.group(1) + + # Create a dictionary to store the extracted data + data = { + "A": A, + "AAA": AAA, + "NS": NS, + "MX": MX, + "PTR": PTR, + "SOA": SOA, + "TXT": TXT + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output + + +def nmap_ai_data_regex(json_string: str) -> Any: + # Define the regular expression patterns for individual values + critical_score_pattern = r'"critical score": \["(.*?)"\]' + os_information_pattern = r'"os information": \["(.*?)"\]' + open_ports_pattern = r'"open ports": \["(.*?)"\]' + open_services_pattern = r'"open services": \["(.*?)"\]' + vulnerable_service_pattern = r'"vulnerable service": \["(.*?)"\]' + found_cve_pattern = r'"found cve": \["(.*?)"\]' + # Initialize variables for extracted data + critical_score = None + os_information = None + open_ports = None + open_services = None + vulnerable_service = None + found_cve = None + + # Extract individual values using patterns + match = re.search(critical_score_pattern, json_string) + if match: + critical_score = match.group(1) + + match = re.search(os_information_pattern, json_string) + if match: + os_information = match.group(1) + match = re.search(open_ports_pattern, json_string) + if match: + open_ports = match.group(1) + match = re.search(open_services_pattern, json_string) + if match: + open_services = match.group(1) + + match = re.search(vulnerable_service_pattern, json_string) + if match: + vulnerable_service = match.group(1) + + match = re.search(found_cve_pattern, json_string) + if match: + found_cve = match.group(1) + + # Create a dictionary to store the extracted data + data = { + "critical score": critical_score, + "os information": os_information, + "open ports": open_ports, + "open services": open_services, + "vulnerable service": vulnerable_service, + "found cve": found_cve + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output + + +def jwt_ai_data_regex(json_string: str) -> Any: + # Define the regular expression patterns for individual values + header_pattern = r'"Header": \{\s*"alg": "(.*?)",\s*"typ": "(.*?)"\s*\}' + payload_pattern = r'"Payload": \{\s*"iss": "(.*?)",\s*"sub": "(.*?)",\s*"aud": "(.*?)",\s*"exp": "(.*?)",\s*"nbf": "(.*?)",\s*"iat": "(.*?)"\s*\}' + signature_pattern = r'"Signature": "(.*?)"' + possible_attacks_pattern = r'"PossibleAttacks": "(.*?)"' + vulnerable_endpoints_pattern = r'"VulnerableEndpoints": "(.*?)"' + + # Initialize variables for extracted data + header = {} + payload = {} + signature = "" + possible_attacks = "" + vulnerable_endpoints = "" + + # Extract individual values using patterns + match_header = re.search(header_pattern, json_string) + if match_header: + header = {"alg": match_header.group(1), "typ": match_header.group(2)} + + match_payload = re.search(payload_pattern, json_string) + if match_payload: + payload = { + "iss": match_payload.group(1), + "sub": match_payload.group(2), + "aud": match_payload.group(3), + "exp": match_payload.group(4), + "nbf": match_payload.group(5), + "iat": match_payload.group(6) + } + + match_signature = re.search(signature_pattern, json_string) + if match_signature: + signature = match_signature.group(1) + + match_attacks = re.search(possible_attacks_pattern, json_string) + if match_attacks: + possible_attacks = match_attacks.group(1) + + match_endpoints = re.search(vulnerable_endpoints_pattern, json_string) + if match_endpoints: + vulnerable_endpoints = match_endpoints.group(1) + + # Create a dictionary to store the extracted data + data = { + "Header": header, + "Payload": payload, + "Signature": signature, + "PossibleAttacks": possible_attacks, + "VulnerableEndpoints": vulnerable_endpoints + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output diff --git a/package/build/lib/GVA/assets.py b/package/build/lib/GVA/assets.py new file mode 100644 index 0000000..93224f1 --- /dev/null +++ b/package/build/lib/GVA/assets.py @@ -0,0 +1,184 @@ +import json +import os +import platform +import subprocess +from typing import Any +from rich import print +from rich.console import Console +from rich.table import Table +from rich.panel import Panel +from rich.console import Group +from rich.align import Align +from rich import box +from rich.markdown import Markdown + +console = Console() + + +class Assets(): + def clearscr() -> None: + try: + osp = platform.system() + match osp: + case 'Darwin': + os.system("clear") + case 'Linux': + os.system("clear") + case 'Windows': + os.system("cls") + except Exception: + pass + + def start_api_app(): + CREATE_NEW_CONSOLE = 0x00000010 + osp = platform.system() + match osp: + case 'Darwin': + subprocess.Popen(["python3", "llama_api.py"], creationflags=CREATE_NEW_CONSOLE) + case 'Linux': + subprocess.Popen(["python3", "llama_api.py"]) + case 'Windows': + subprocess.Popen(["python", "llama_api.py"], creationflags=CREATE_NEW_CONSOLE) + + def flatten_json(self, data: Any, separator: Any = '.') -> Any: + flattened_data = {} + for key, value in data.items(): + if isinstance(value, dict): + nested_data = self.flatten_json(value, separator) + for nested_key, nested_value in nested_data.items(): + flattened_data[key + separator + nested_key] = nested_value + else: + flattened_data[key] = value + return flattened_data + + def help_menu() -> None: + table = Table(title="Help Menu for GVA") + table.add_column("Options", style="cyan") + table.add_column("Input Type", style="green") + table.add_column("Argument Input", style="green") + table.add_column("Discription", style="green") + table.add_column("Other internal options", style="green") + table.add_row("Attack", "--attack", "TXT/STRING", + "The Attack the user whats to run", "sub / dns / nmap / geo") + table.add_row("Target", "--target", "IP/HOSTNAME", + "The target of the user", "None") + table.add_row("Domain List", "--list", "Path to text file", + "subdomain dictionary list", "Path") + table.add_row("Profile", "--profile", "INT (1-5)", + "The type of Nmap Scan the user intends", "None") + table.add_row("AI", "--ai", "STRING", + "Choose your AI of choice", "bard / openai (default)") + table.add_row("menu", "--menu", "BOOL", + "Interactive UI menu", "True / False (Default)") + table.add_row("Rich Help", "--r", "STRING", + "Pritty Help menu", "help") + console.print(table) + + def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: + match attack_type: + case "Nmap": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case "DNS": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case "GeoIP": + data = json.loads(jdata) + table = Table(title="GVA Report for GeoIP", show_header=True, header_style="bold magenta") + table.add_column("Identifiers", style="cyan") + table.add_column("Data", style="green") + + flattened_data: dict = self.flatten_json(data, separator='.') + + for key, value in flattened_data.items(): + value_str = str(value) + table.add_row(key, value_str) + + console = Console() + console.print(table) diff --git a/package/build/lib/GVA/dns_recon.py b/package/build/lib/GVA/dns_recon.py new file mode 100644 index 0000000..e8f309d --- /dev/null +++ b/package/build/lib/GVA/dns_recon.py @@ -0,0 +1,72 @@ +from typing import Any, Optional +import requests +import dns.resolver as dns_resolver_module +from rich.progress import track + + +class DNSRecon: + def dns_resolver(self, AIModels, target: str, akey: Optional[str], bkey: Optional[str], lkey, lendpoint, AI: str) -> Any: + if target is not None: + pass + else: + raise ValueError("InvalidTarget: Target Not Provided") + analyze = '' + # The DNS Records to be enumerated + record_types = ['A', 'AAAA', 'NS', 'CNAME', 'MX', 'PTR', 'SOA', 'TXT'] + for record_type in track(record_types): + try: + answer = dns_resolver_module.resolve(target, record_type) + for server in answer: + st = server.to_text() + analyze += f"\n{record_type} : {st}" + except dns_resolver_module.NoAnswer: + print('No record Found') + pass + except dns_resolver_module.NXDOMAIN: + print('NXDOMAIN record NOT Found') + pass + except dns_resolver_module.LifetimeTimeout: + print("Timed out, check your internet") + pass + except requests.exceptions.InvalidHeader: + pass + except KeyboardInterrupt: + print("Bye") + quit() + + response = "" + match AI: + case 'openai': + try: + if akey is not None: + # Clean up Bearer token from newline characters + akey = akey.replace('\n', '') + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.gpt_ai(akey, analyze) + except KeyboardInterrupt: + print("Bye") + quit() + case 'bard': + try: + if bkey is not None: + bkey = bkey.replace('\n', '') + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.BardAI(bkey, analyze) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama': + try: + response = AIModels.llama_AI(analyze, "local", lkey, lendpoint) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama-api': + try: + response = AIModels.llama_AI(analyze, "runpod", lkey, lendpoint) + except KeyboardInterrupt: + print("Bye") + quit() + return str(response) diff --git a/package/build/lib/GVA/geo.py b/package/build/lib/GVA/geo.py new file mode 100644 index 0000000..cc3f820 --- /dev/null +++ b/package/build/lib/GVA/geo.py @@ -0,0 +1,17 @@ +from typing import Any +from typing import Optional + +import requests + + +class geo_ip_recon(): + def geoip(key: Optional[str], target: str) -> Any: + if key is None: + raise ValueError("KeyNotFound: Key Not Provided") + assert key is not None # This will help the type checker + if target is None: + raise ValueError("InvalidTarget: Target Not Provided") + url = f"https://api.ipgeolocation.io/ipgeo?apiKey={key}&ip={target}" + response = requests.get(url) + content = response.text + return content diff --git a/package/build/lib/GVA/gui.py b/package/build/lib/GVA/gui.py new file mode 100644 index 0000000..c3407fa --- /dev/null +++ b/package/build/lib/GVA/gui.py @@ -0,0 +1,425 @@ +import json +import re +from typing import Any +from typing import Optional + +import customtkinter +import dns.resolver +import nmap +import openai +import requests +from rich.progress import track + +customtkinter.set_appearance_mode("dark") +customtkinter.set_default_color_theme("dark-blue") + +root = customtkinter.CTk() +root.title("GVA - GUI") +root.geometry("600x400") + +nm = nmap.PortScanner() +model_engine = "text-davinci-003" + + +def application() -> None: + try: + apikey = entry1.get() + openai.api_key = apikey + target = entry2.get() + attack = entry5.get() + outputf = str(entry4.get()) + match attack: + case 'geo': + val = geoip(apikey, target) + print(val) + output_save(val, outputf) + case "nmap": + p = int(entry3.get()) + match p: + case 1: + val = scanner(target, 1, apikey) + print(val) + output_save(val, outputf) + case 2: + val = scanner(target, 2, apikey) + print(val) + output_save(val, outputf) + case 3: + val = scanner(target, 3, apikey) + print(val) + output_save(val, outputf) + case 4: + val = scanner(target, 4, apikey) + print(val) + output_save(val, outputf) + case 5: + val = scanner(target, 5, apikey) + print(val) + output_save(val, outputf) + case "dns": + val = dns_recon(target, apikey) + output_save(val, outputf) + case "subd": + val = sub(target) + output_save(val, outputf) + except KeyboardInterrupt: + print("Keyboard Interrupt detected ...") + + +def dns_extract_data(json_string: str) -> Any: + # Define the regular expression patterns for individual values + A_pattern = r'"A": \["(.*?)"\]' + AAA_pattern = r'"AAA: \["(.*?)"\]' + NS_pattern = r'"NS": \["(.*?)"\]' + MX_pattern = r'"MX": \["(.*?)"\]' + PTR_pattern = r'"PTR": \["(.*?)"\]' + SOA_pattern = r'"SOA": \["(.*?)"\]' + TXT_pattern = r'"TXT": \["(.*?)"\]' + + # Initialize variables for extracted data + A = None + AAA = None + NS = None + MX = None + PTR = None + SOA = None + TXT = None + + # Extract individual values using patterns + match = re.search(A_pattern, json_string) + if match: + A = match.group(1) + + match = re.search(AAA_pattern, json_string) + if match: + AAA = match.group(1) + + match = re.search(NS_pattern, json_string) + if match: + NS = match.group(1) + + match = re.search(MX_pattern, json_string) + if match: + MX = match.group(1) + + match = re.search(PTR_pattern, json_string) + if match: + PTR = match.group(1) + + match = re.search(SOA_pattern, json_string) + if match: + SOA = match.group(1) + + match = re.search(TXT_pattern, json_string) + if match: + TXT = match.group(1) + + # Create a dictionary to store the extracted data + data = { + "A": A, + "AAA": AAA, + "NS": NS, + "MX": MX, + "PTR": PTR, + "SOA": SOA, + "TXT": TXT + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output + + +def port_extract_data(json_string: str) -> Any: + # Define the regular expression patterns for individual values + critical_score_pattern = r'"critical score": \["(.*?)"\]' + os_information_pattern = r'"os information": \["(.*?)"\]' + open_ports_pattern = r'"open ports": \["(.*?)"\]' + open_services_pattern = r'"open services": \["(.*?)"\]' + vulnerable_service_pattern = r'"vulnerable service": \["(.*?)"\]' + found_cve_pattern = r'"found cve": \["(.*?)"\]' + + # Initialize variables for extracted data + critical_score = None + os_information = None + open_ports = None + open_services = None + vulnerable_service = None + found_cve = None + + # Extract individual values using patterns + match = re.search(critical_score_pattern, json_string) + if match: + critical_score = match.group(1) + + match = re.search(os_information_pattern, json_string) + if match: + os_information = match.group(1) + + match = re.search(open_ports_pattern, json_string) + if match: + open_ports = match.group(1) + + match = re.search(open_services_pattern, json_string) + if match: + open_services = match.group(1) + + match = re.search(vulnerable_service_pattern, json_string) + if match: + vulnerable_service = match.group(1) + + match = re.search(found_cve_pattern, json_string) + if match: + found_cve = match.group(1) + + # Create a dictionary to store the extracted data + data = { + "critical score": critical_score, + "os information": os_information, + "open ports": open_ports, + "open services": open_services, + "vulnerable service": vulnerable_service, + "found cve": found_cve + } + + # Convert the dictionary to JSON format + json_output = json.dumps(data) + + return json_output + + +def DnsAI(analyze: str, key: Optional[str]) -> str: + openai.api_key = key + prompt = f""" + Do a DNS analysis on the provided DNS scan information + The DNS output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The DNS scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given + 3) The final output must be kept to a minimal + + The output format: + {{ + "A": [""], + "AAA": [""], + "NS": [""], + "MX": [""], + "PTR": [""], + "SOA": [""], + "TXT": [""] + }} + + DNS Data to be analyzed: {analyze} + """ + try: + # A structure for the request + completion = openai.Completion.create( + engine=model_engine, + prompt=prompt, + max_tokens=1024, + n=1, + stop=None, + ) + response = completion.choices[0].text + return dns_extract_data(str(response)) + except KeyboardInterrupt: + print("Bye") + quit() + + +def PortAI(key: str, data: Any) -> str: + openai.api_key = key + try: + prompt = f""" + Do a NMAP scan analysis on the provided NMAP scan information + The NMAP output must return in a JSON format accorging to the provided + output format. The data must be accurate in regards towards a pentest report. + The data must follow the following rules: + 1) The NMAP scans must be done from a pentester point of view + 2) The final output must be minimal according to the format given. + 3) The final output must be kept to a minimal. + 4) If a value not found in the scan just mention an empty string. + 5) Analyze everything even the smallest of data. + + The output format: + {{ + "critical score": [""], + "os information": [""], + "open ports": [""], + "open services": [""], + "vulnerable service": [""], + "found cve": [""] + }} + + NMAP Data to be analyzed: {data} + """ + # A structure for the request + completion = openai.Completion.create( + engine=model_engine, + prompt=prompt, + max_tokens=1024, + n=1, + stop=None, + ) + response = completion.choices[0].text + return port_extract_data(str(response)) + except KeyboardInterrupt: + print("Bye") + quit() + + +def geoip(key: Optional[str], target: str) -> Any: + if key is None: + raise ValueError("KeyNotFound: Key Not Provided") + assert key is not None # This will help the type checker + if target is None: + raise ValueError("InvalidTarget: Target Not Provided") + url = f"https://api.ipgeolocation.io/ipgeo?apiKey={key}&ip={target}" + response = requests.get(url) + content = response.text + return content + + +def output_save(output: Any, outf: Any) -> Any: + top = customtkinter.CTkToplevel(root) + top.title("GVA Output") + top.grid_rowconfigure(0, weight=1) + top.grid_columnconfigure(0, weight=1) + top.textbox = customtkinter.CTkTextbox( + master=top, height=500, width=400, corner_radius=0) + top.textbox.grid(row=0, column=0, sticky="nsew") + + try: + file = open(outf, 'x') + except FileExistsError: + file = open(outf, "r+") + file.write(str(output)) + file.close + top.textbox.insert("0.0", text=output) + + +def sub(target: str) -> Any: + s_array = ['www', 'mail', 'ftp', 'localhost', 'webmail', 'smtp', 'hod', 'butterfly', 'ckp', + 'tele2', 'receiver', 'reality', 'panopto', 't7', 'thot', 'wien', 'uat-online', 'Footer'] + + ss = [] + out = "" + for subd in s_array: + try: + ip_value = dns.resolver.resolve(f'{subd}.{target}', 'A') + if ip_value: + ss.append(f'{subd}.{target}') + if f"{subd}.{target}" in ss: + print(f'{subd}.{target} | Found') + out += f'{subd}.{target}' + out += "\n" + out += "" + else: + pass + except dns.resolver.NXDOMAIN: + pass + except dns.resolver.NoAnswer: + pass + except KeyboardInterrupt: + print('Ended') + quit() + return out + + +def dns_recon(target: Optional[str], key: str) -> str: + if key is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + if target is not None: + pass + else: + raise ValueError("InvalidTarget: Target Not Provided") + analyze = '' + # The DNS Records to be enumeratee + record_types = ['A', 'AAAA', 'NS', 'CNAME', 'MX', 'PTR', 'SOA', 'TXT'] + for records in track(record_types): + try: + answer = dns.resolver.resolve(target, records) + for server in answer: + st = server.to_text() + analyze += "\n" + analyze += records + analyze += " : " + analyze += st + except dns.resolver.NoAnswer: + print('No record Found') + pass + except dns.resolver.NXDOMAIN: + print('NXDOMAIN record NOT Found') + pass + except KeyboardInterrupt: + print("Bye") + quit() + try: + response = DnsAI(key, analyze) + return str(response) + except KeyboardInterrupt: + print("Bye") + quit() + + +def scanner(ip: Optional[str], profile: int, key: str) -> str: + if key is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + # Handle the None case + profile_argument = "" + # The port profiles or scan types user can choose + if profile == 1: + profile_argument = '-Pn -sV -T4 -O -F' + elif profile == 2: + profile_argument = '-Pn -T4 -A -v' + elif profile == 3: + profile_argument = '-Pn -sS -sU -T4 -A -v' + elif profile == 4: + profile_argument = '-Pn -p- -T4 -A -v' + elif profile == 5: + profile_argument = '-Pn -sS -sU -T4 -A -PE -PP -PS80,443 -PA3389 -PU40125 -PY -g 53 --script=vuln' + else: + raise ValueError(f"Invalid Argument: {profile}") + # The scanner with GPT Implemented + nm.scan('{}'.format(ip), arguments='{}'.format(profile_argument)) + json_data = nm.analyse_nmap_xml_scan() + analyze = json_data["scan"] + try: + response = PortAI(key, analyze) + except KeyboardInterrupt: + print("Bye") + quit() + return str(response) + + +frame = customtkinter.CTkFrame(master=root) +frame.pack(pady=20, padx=60, fill="both", expand=True) + +label = customtkinter.CTkLabel( + master=frame, text="GVA System") +label.pack(pady=12, padx=10) + +entry1 = customtkinter.CTkEntry(master=frame, placeholder_text="API_KEY") +entry1.pack(pady=12, padx=10) +entry2 = customtkinter.CTkEntry(master=frame, placeholder_text="Target") +entry2.pack(pady=12, padx=10) +entry5 = customtkinter.CTkEntry( + master=frame, placeholder_text="Attack (nmap/dns)") +entry5.pack(pady=12, padx=10) +entry4 = customtkinter.CTkEntry(master=frame, placeholder_text="Savefile.json") +entry4.pack(pady=12, padx=10) +entry3 = customtkinter.CTkEntry( + master=frame, placeholder_text="Profile (Only Nmap)") +entry3.pack(pady=12, padx=10) +radiobutton_var = customtkinter.IntVar(value=1) +button = customtkinter.CTkButton( + master=frame, text="Run", command=application) +button.pack(pady=12, padx=10) + +root.mainloop() diff --git a/package/build/lib/GVA/jwt.py b/package/build/lib/GVA/jwt.py new file mode 100644 index 0000000..dda89fb --- /dev/null +++ b/package/build/lib/GVA/jwt.py @@ -0,0 +1,71 @@ +import jwt +import json +import base64 +from datetime import datetime +from typing import Optional + + +class JWTAnalyzer: + + def analyze(self, AIModels, token, openai_api_token: Optional[str], bard_api_token: Optional[str], llama_api_token: Optional[str], llama_endpoint: Optional[str], AI: str) -> str: + try: + self.algorithm_used = "" + self.decoded_payload = "" + self.expiration_time = "" + parts = token.split('.') + if len(parts) != 3: + raise ValueError("Invalid token format. Expected 3 parts.") + + header = json.loads(base64.urlsafe_b64decode(parts[0] + '===').decode('utf-8', 'replace')) + self.algorithm_used = header.get('alg', 'Unknown Algorithm') + payload = json.loads(base64.urlsafe_b64decode(parts[1] + '===').decode('utf-8', 'replace')) + self.decoded_payload = payload + self.claims = {key: value for key, value in payload.items()} + if 'exp' in payload: + self.expiration_time = datetime.utcfromtimestamp(payload['exp']) + self.analysis_result = { + 'Algorithm Used': self.algorithm_used, + 'Decoded Payload': self.decoded_payload, + 'Claims': self.claims, + 'Expiration Time': self.expiration_time + } + str_data = str(self.analysis_result) + match AI: + case 'openai': + try: + if openai_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.gpt_ai(str_data, openai_api_token) + except KeyboardInterrupt: + print("Bye") + quit() + case 'bard': + try: + if bard_api_token is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.BardAI(bard_api_token, str_data) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama': + try: + response = AIModels.llama_AI(str_data, "local", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama-api': + try: + response = AIModels.Llama_AI(str_data, "runpod", llama_api_token, llama_endpoint) + except KeyboardInterrupt: + print("Bye") + quit() + final_data = str(response) + return final_data + except jwt.ExpiredSignatureError: + self.analysis_result = {'Error': 'Token has expired.'} + except jwt.InvalidTokenError as e: + self.analysis_result = {'Error': f'Invalid token: {e}'} diff --git a/package/build/lib/GVA/menus.py b/package/build/lib/GVA/menus.py new file mode 100644 index 0000000..aafd825 --- /dev/null +++ b/package/build/lib/GVA/menus.py @@ -0,0 +1,598 @@ +import json +import os +import platform +from typing import Any +from rich import print +from rich.console import Console +from rich.table import Table +from rich.panel import Panel +from rich.console import Group +from rich.align import Align +from rich import box +from rich.markdown import Markdown +from GVA.dns_recon import DNSRecon +from GVA.geo import geo_ip_recon +from GVA.scanner import NetworkScanner +from GVA.ai_models import NMAP_AI_MODEL +from GVA.ai_models import DNS_AI_MODEL +from GVA.ai_models import JWT_AI_MODEL +from GVA.subdomain import sub_enum + +dns_enum = DNSRecon() +geo_ip = geo_ip_recon() +p_ai_models = NMAP_AI_MODEL() +dns_ai_models = DNS_AI_MODEL() +jwt_analyzer = JWTAnalyzer() +port_scanner = NetworkScanner() +sub_recon = sub_enum() +console = Console() +target = "" +profile = "" +attack = "" +choice = "" +list_loc = "" +ai = "" +menu = "" +ai_set_args = "" +keyset = "" +akey_set = "" +bkey_set = "" +t = "" +profile_num = "" +ai_set = "" +llamakey = "" +llamaendpoint = "" + + +def clearscr() -> None: + try: + osp = platform.system() + match osp: + case 'Darwin': + os.system("clear") + case 'Linux': + os.system("clear") + case 'Windows': + os.system("cls") + except Exception: + pass + + +class Menus(): + def flatten_json(self, data: Any, separator: Any = '.') -> Any: + flattened_data = {} + for key, value in data.items(): + if isinstance(value, dict): + nested_data = self.flatten_json(value, separator) + for nested_key, nested_value in nested_data.items(): + flattened_data[key + separator + nested_key] = nested_value + else: + flattened_data[key] = value + return flattened_data + + def print_output(self, attack_type: str, jdata: str, ai: str) -> Any: + match attack_type: + case "Nmap": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case "DNS": + match ai: + case 'openai': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'bard': + data = json.loads(jdata) + table = Table(title=f"GVA Report for {attack_type}", show_header=True, header_style="bold magenta") + table.add_column("Variables", style="cyan") + table.add_column("Results", style="green") + + for key, value in data.items(): + table.add_row(key, value) + print(table) + case 'llama': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case 'llama-api': + ai_out = Markdown(jdata) + message_panel = Panel( + Align.center( + Group("\n", Align.center(ai_out)), + vertical="middle", + ), + box=box.ROUNDED, + padding=(1, 2), + title="[b red]The GVA LLama2", + border_style="blue", + ) + print(message_panel) + case "GeoIP": + data = json.loads(jdata) + table = Table(title="GVA Report for GeoIP", show_header=True, header_style="bold magenta") + table.add_column("Identifiers", style="cyan") + table.add_column("Data", style="green") + + flattened_data: dict = self.flatten_json(data, separator='.') + + for key, value in flattened_data.items(): + value_str = str(value) + table.add_row(key, value_str) + + console = Console() + console.print(table) + + def nmap_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "AI Options") + table.add_row("2", "Set Target") + table.add_row("3", "Set Profile") + table.add_row("4", "Show options") + table.add_row("5", "Run Attack") + table.add_row("r", "Return") + console.print(table) + self.option = input("Enter your choice: ") + match self.option: + case "1": + clearscr() + table0 = Table() + table0.add_column("Options", style="cyan") + table0.add_column("AI Available", style="green") + table0.add_row("1", "OpenAI") + table0.add_row("2", "Bard") + table0.add_row("3", "LLama2") + print(Panel(table0)) + self.ai_set_choice = input("Enter AI of Choice: ") + match self.ai_set_choice: + case "1": + self.ai_set_args, self.ai_set = "openai", "openai" + self.akey_set = input("Enter OpenAI API: ") + print(Panel(f"API-Key Set: {self.akey_set}")) + case "2": + self.ai_set_args, self.ai_set = "bard", "bard" + self.bkey_set = input("Enter Bard AI API: ") + print(Panel(f"API-Key Set: {self.bkey_set}")) + case "3": + clearscr() + tablel = Table() + tablel.add_column("Options", style="cyan") + tablel.add_column("Llama Options", style="cyan") + tablel.add_row("1", "Llama Local") + tablel.add_row("2", "Llama RunPod") + print(tablel) + self.ai_set_choice = input("Enter AI of Choice: ") + self.ai_set_args = "llama" + self.ai_set = "llama" + if self.ai_set_choice == "1": + self.ai_set = "llama" + print(Panel("No Key needed")) + print(Panel("Selected LLama")) + elif self.ai_set_choice == "2": + self.ai_set = "llama-api" + self.llamaendpoint = input("Enter Runpod Endpoint ID: ") + self.llamakey = input("Enter Runpod API Key: ") + print(Panel(f"API-Key Set: {self.llamakey}")) + print(Panel(f"Runpod Endpoint Set: {self.llamaendpoint}")) + self.nmap_menu() + case "2": + clearscr() + print(Panel("Set Target Hostname or IP")) + self.t = input("Enter Target: ") + print(Panel(f"Target Set: {self.t}")) + self.nmap_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("1", "-Pn -sV -T4 -O -F") + table1.add_row("2", "-Pn -T4 -A -v") + table1.add_row("3", "-Pn -sS -sU -T4 -A -v") + table1.add_row("4", "-Pn -p- -T4 -A -v") + table1.add_row("5", "-Pn -sS -sU -T4 -A -PE -PP -PY -g 53 --script=vuln") + print(Panel(table1)) + self.profile_num = input("Enter your Profile: ") + print(Panel(f"Profile Set {self.profile_num}")) + self.nmap_menu() + case "4": + clearscr() + table2 = Table() + table2.add_column("Options", style="cyan") + table2.add_column("Value", style="green") + table2.add_row("AI Set", str(self.ai_set_args)) + table2.add_row("OpenAI API Key", str(self.akey_set)) + table2.add_row("Bard AI API Key", str(self.bkey_set)) + table2.add_row("Llama Runpod API Key", str(self.llamakey)) + table2.add_row("Runpod Endpoint ID", str(self.llamaendpoint)) + table2.add_row("Target", str(self.t)) + table2.add_row("Profile", str(self.profile_num)) + print(Panel(table2)) + self.nmap_menu() + case "5": + clearscr() + pout: str = port_scanner.scanner( + AIModels=p_ai_models, + ip=self.t, + profile=int(self.profile_num), + akey=self.akey_set, + bkey=self.bkey_set, + lkey=self.lkey, + lendpoint=self.lendpoint, + AI=self.ai_set + ) + self.print_output("Nmap", pout, self.ai_set) + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def dns_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "AI Option") + table.add_row("2", "Set Target") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + option = input("Enter your choice: ") + match option: + case "1": + clearscr() + table0 = Table() + table0.add_column("Options", style="cyan") + table0.add_column("AI Available", style="green") + table0.add_row("1", "OpenAI") + table0.add_row("2", "Bard") + table0.add_row("3", "LLama2") + print(Panel(table0)) + self.ai_set_choice = input("Enter AI of Choice: ") + match self.ai_set_choice: + case "1": + self.ai_set_args, self.ai_set = "openai", "openai" + self.akey_set = input("Enter OpenAI API: ") + print(Panel(f"API-Key Set: {self.akey_set}")) + case "2": + self.ai_set_args, self.ai_set = "bard", "bard" + self.bkey_set = input("Enter Bard AI API: ") + print(Panel(f"API-Key Set: {self.bkey_set}")) + case "3": + clearscr() + tablel = Table() + tablel.add_column("Options", style="cyan") + tablel.add_column("Llama Options", style="cyan") + tablel.add_row("1", "Llama Local") + tablel.add_row("2", "Llama RunPod") + print(tablel) + self.ai_set_choice = input("Enter AI of Choice: ") + self.ai_set_args = "llama" + self.ai_set = "llama" + if self.ai_set_choice == "1": + self.ai_set = "llama" + print(Panel("No Key needed")) + print(Panel("Selected LLama")) + elif self.ai_set_choice == "2": + self.ai_set = "llama-api" + self.llamaendpoint = input("Enter Runpod Endpoint ID: ") + self.llamakey = input("Enter Runpod API Key: ") + print(Panel(f"API-Key Set: {self.llamakey}")) + print(Panel(f"Runpod Endpoint Set: {self.llamaendpoint}")) + self.dns_menu() + case "2": + clearscr() + print(Panel("Set Target Hostname or IP")) + self.t = input("Enter Target: ") + print(Panel(f"Target Set:{self.t}")) + self.dns_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("AI Set", str(self.ai_set_args)) + table1.add_row("OpenAI API Key", str(self.akey_set)) + table1.add_row("Bard AI API Key", str(self.bkey_set)) + table1.add_row("Llama Runpod API Key", str(self.llamakey)) + table1.add_row("Runpod Endpoint ID", str(self.llamaendpoint)) + table1.add_row("Target", str(self.t)) + print(Panel(table1)) + self.dns_menu() + case "4": + clearscr() + dns_output: str = dns_enum.dns_resolver( + AIModels=dns_ai_models, + target=self.t, + akey=self.akey_set, + bkey=self.bkey_set, + lkey=self.lkey, + lendpoint=self.lendpoint, + AI=self.ai_set + ) + self.print_output("DNS", dns_output, self.ai_set) + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def geo_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "ADD API Key") + table.add_row("2", "Set Target") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + self.option = input("Enter your choice: ") + match self.option: + case "1": + clearscr() + self.keyset = input("Enter GEO-IP API: ") + print(Panel(f"GEOIP API Key Set: {self.keyset}")) + self.geo_menu() + case "2": + clearscr() + print(Panel("Set Target Hostname or IP")) + self.t = input("Enter Target: ") + print(Panel(f"Target Set: {self.t}")) + self.geo_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("API Key", str(self.keyset)) + table1.add_row("Target", str(self.t)) + print(Panel(table1)) + self.geo_menu() + case "4": + clearscr() + geo_output: str = geo_ip.geoip(self.keyset, self.t) + self.print_output("GeoIP", str(geo_output), ai="None") + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def jwt_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "AI Option") + table.add_row("2", "Set Token") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + option = input("Enter your choice: ") + match option: + case "1": + clearscr() + table0 = Table() + table0.add_column("Options", style="cyan") + table0.add_column("AI Available", style="green") + table0.add_row("1", "OpenAI") + table0.add_row("2", "Bard") + table0.add_row("3", "LLama2") + print(Panel(table0)) + self.ai_set_choice = input("Enter AI of Choice: ") + match self.ai_set_choice: + case "1": + self.ai_set_args, self.ai_set = "openai", "openai" + self.akey_set = input("Enter OpenAI API: ") + print(Panel(f"API-Key Set: {self.akey_set}")) + case "2": + self.ai_set_args, self.ai_set = "bard", "bard" + self.bkey_set = input("Enter Bard AI API: ") + print(Panel(f"API-Key Set: {self.bkey_set}")) + case "3": + clearscr() + tablel = Table() + tablel.add_column("Options", style="cyan") + tablel.add_column("Llama Options", style="cyan") + tablel.add_row("1", "Llama Local") + tablel.add_row("2", "Llama RunPod") + print(tablel) + self.ai_set_choice = input("Enter AI of Choice: ") + self.ai_set_args = "llama" + self.ai_set = "llama" + if self.ai_set_choice == "1": + self.ai_set = "llama" + print(Panel("No Key needed")) + print(Panel("Selected LLama")) + elif self.ai_set_choice == "2": + self.ai_set = "llama-api" + self.llamaendpoint = input("Enter Runpod Endpoint ID: ") + self.llamakey = input("Enter Runpod API Key: ") + print(Panel(f"API-Key Set: {self.llamakey}")) + print(Panel(f"Runpod Endpoint Set: {self.llamaendpoint}")) + self.jwt_menu() + case "2": + clearscr() + print(Panel("Set Token value")) + self.t = input("Enter TOKEN: ") + print(Panel(f"Token Set:{self.t}")) + self.jwt_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("AI Set", str(self.ai_set_args)) + table1.add_row("OpenAI API Key", str(self.akey_set)) + table1.add_row("Bard AI API Key", str(self.bkey_set)) + table1.add_row("Llama Runpod API Key", str(self.llamakey)) + table1.add_row("Runpod Endpoint ID", str(self.llamaendpoint)) + table1.add_row("JWT TOKEN", str(self.t)) + print(Panel(table1)) + self.jwt_menu() + case "4": + clearscr() + JWT_output: str = jwt_analyzer.analyze( + AIModels=jwt_ai_model, + token=self.t, + openai_api_token=self.akey_set, + bard_api_token=self.bkey_set, + llama_api_token=self.lkey, + llama_endpoint=self.lendpoint, + AI=self.ai_set + ) + self.print_output("JWT", JWT_output, self.ai_set) + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def sub_menu(self) -> None: + try: + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "ADD Subdomain list") + table.add_row("2", "Set Target") + table.add_row("3", "Show options") + table.add_row("4", "Run Attack") + table.add_row("r", "Return") + console.print(table) + self.option = input("Enter your choice: ") + match self.option: + case "1": + clearscr() + print(Panel("Set TXT subdomain file location")) + self.list_loc = input("Enter List Location: ") + print(Panel(f"Location Set: {self.list_loc}")) + self.sub_menu() + case "2": + clearscr() + print(Panel("Set Target Hostname or IP")) + self.t = input("Enter Target: ") + print(Panel(f"Target Set: {self.t}")) + self.sub_menu() + case "3": + clearscr() + table1 = Table() + table1.add_column("Options", style="cyan") + table1.add_column("Value", style="green") + table1.add_row("Location", str(self.list_loc)) + table1.add_row("Target", str(self.t)) + print(Panel(table1)) + self.sub_menu() + case "4": + clearscr() + sub_output: str = sub_recon.sub_enumerator(self.t, self.list_loc) + console.print(sub_output, style="bold underline") + case "r": + clearscr() + self.menu_term() + except KeyboardInterrupt: + print(Panel("Exiting Program")) + + def __init__(self, lamma_key, llama_api_endpoint, initial_keyset, target, profile_num, ai_set, openai_akey_set, bard_key_set, ai_set_args, llama_runpod_key, llama_endpoint) -> None: + try: + self.lkey = lamma_key + self.lendpoint = llama_api_endpoint + self.keyset = initial_keyset + self.t = target + self.profile_num = profile_num + self.ai_set = ai_set + self.akey_set = openai_akey_set + self.bkey_set = bard_key_set + self.ai_set_args = ai_set_args + self.llamakey = llama_runpod_key + self.llamaendpoint = llama_endpoint + table = Table() + table.add_column("Options", style="cyan") + table.add_column("Utility", style="green") + table.add_row("1", "Nmap Enum") + table.add_row("2", "DNS Enum") + table.add_row("3", "Subdomain Enum") + table.add_row("4", "GEO-IP Enum") + table.add_row("q", "Quit") + console.print(table) + option = input("Enter your choice: ") + match option: + case "1": + clearscr() + self.nmap_menu() + case "2": + clearscr() + self.dns_menu() + case "3": + clearscr() + self.sub_menu() + case "4": + clearscr() + self.geo_menu() + case "q": + quit() + except KeyboardInterrupt: + print(Panel("Exiting Program")) diff --git a/package/build/lib/GVA/requirements.txt b/package/build/lib/GVA/requirements.txt new file mode 100644 index 0000000..c3a6004 --- /dev/null +++ b/package/build/lib/GVA/requirements.txt @@ -0,0 +1,20 @@ +aiohttp==3.8.4 +aiosignal==1.3.1 +async-timeout==4.0.2 +attrs==22.2.0 +certifi==2022.12.7 +charset-normalizer==3.0.1 +frozenlist==1.3.3 +idna==3.4 +multidict==6.0.4 +openai==0.27.0 +python-nmap==0.7.1 +requests==2.28.2 +tqdm==4.65.0 +urllib3==1.26.14 +yarl==1.8.2 +dnspython +rich +cowsay +tk +customtkinter diff --git a/package/build/lib/GVA/scanner.py b/package/build/lib/GVA/scanner.py new file mode 100644 index 0000000..5ebc893 --- /dev/null +++ b/package/build/lib/GVA/scanner.py @@ -0,0 +1,63 @@ +from typing import Optional + +import nmap +nm = nmap.PortScanner() + + +class NetworkScanner(): + def scanner(self, AIModels, ip: Optional[str], profile: int, akey: Optional[str], bkey: Optional[str], lkey, lendpoint, AI: str) -> str: + profile_arguments = { + 1: '-Pn -sV -T4 -O -F', + 2: '-Pn -T4 -A -v', + 3: '-Pn -sS -sU -T4 -A -v', + 4: '-Pn -p- -T4 -A -v', + 5: '-Pn -sS -sU -T4 -A -PE -PP -PY -g 53 --script=vuln', + 6: '-Pn -sV -p- -A', + 7: '-Pn -sS -sV -O -T4 -A', + 8: '-Pn -sC', + 9: '-Pn -p 1-65535 -T4 -A -v', + 10: '-Pn -sU -T4', + 11: '-Pn -sV --top-ports 100', + 12: '-Pn -sS -sV -T4 --script=default,discovery,vuln', + 13: '-Pn -F' + } + # The scanner with GPT Implemented + nm.scan('{}'.format(ip), arguments='{}'.format(profile_arguments.get(profile))) + json_data = nm.analyse_nmap_xml_scan() + analyze = json_data["scan"] + match AI: + case 'openai': + try: + if akey is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.GPT_AI(akey, analyze) + except KeyboardInterrupt: + print("Bye") + quit() + case 'bard': + try: + if bkey is not None: + pass + else: + raise ValueError("KeyNotFound: Key Not Provided") + response = AIModels.BardAI(bkey, analyze) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama': + try: + response = AIModels.Llama_AI(analyze, "local", lkey, lendpoint) + except KeyboardInterrupt: + print("Bye") + quit() + case 'llama-api': + try: + response = AIModels.Llama_AI(analyze, "runpod", lkey, lendpoint) + except KeyboardInterrupt: + print("Bye") + quit() + self.response = response + text = str(self.response) + return text diff --git a/package/build/lib/GVA/subdomain.py b/package/build/lib/GVA/subdomain.py new file mode 100644 index 0000000..79ca833 --- /dev/null +++ b/package/build/lib/GVA/subdomain.py @@ -0,0 +1,44 @@ +import dns.resolver +from rich.console import Console +from rich.progress import track +from rich.table import Table + +console = Console() + + +class sub_enum(): + def display_urls(sd_data: list[str], count: int) -> None: + console = Console() + table = Table(title=f"GVA Subdomain report. found out of {count}", show_header=True, header_style="bold") + table.add_column("Index", justify="right", style="cyan") + table.add_column("URL", style="green") + for index, url in enumerate(sd_data): + table.add_row(str(index), url) + console.print(table) + + def sub_enumerator(self, target: str, list: str) -> str: + sd_data = [] + s_array = [] + count: int = 0 + with open(list, "r") as file: + for line in file: + subdomain_key = line.strip() + s_array.append(subdomain_key) + for subd in track(s_array): + try: + ip_value = dns.resolver.resolve(f'{subd}.{target}', 'A') + if ip_value: + sd_data.append(f'{subd}.{target}') + if f"{subd}.{target}" in sd_data: + count = count + 1 + else: + pass + except dns.resolver.NXDOMAIN: + pass + except dns.resolver.NoAnswer: + pass + except KeyboardInterrupt: + print('Ended') + quit() + self.display_urls(sd_data, count) + return 'Done'