diff --git a/README.md b/README.md index 12d2e79..38e20a6 100644 --- a/README.md +++ b/README.md @@ -9,26 +9,43 @@
- + +
- + +
+> Attention! DPULSE is a research tool. It is not intended for criminal activities! Use DPULSE only on allowed domains and for legal purposes! -> DPULSE was created as a research tool, and it is not intended for criminal activities. Use DPULSE only on allowed domains and for legal purposes! +# Repository map -> You can visit [DPULSE wiki](https://github.com/OSINT-TECHNOLOGIES/dpulse/wiki/DPULSE-WIKI) in order to get more technical information about this project +## What to visit? -> You can visit [DPULSE roadmap](https://github.com/users/OSINT-TECHNOLOGIES/projects/1) to get more information about development process +| What do you want to see? | Link | +| --- | --- | +| What is DPULSE? | [See "About DPULSE" page](https://github.com/OSINT-TECHNOLOGIES/dpulse?tab=readme-ov-file#about-dpulse) | +| Where I can find some demo and use-cases? | [See "Demo and use-cases" page](https://github.com/OSINT-TECHNOLOGIES/dpulse?tab=readme-ov-file#dpulse-demo-and-use-cases) | +| I want to read project documentation | [See DPULSE wiki](https://github.com/OSINT-TECHNOLOGIES/dpulse/wiki/DPULSE-WIKI) | +| I want to see project roadmap and future development plans | [See DPULSE roadmap](https://github.com/users/OSINT-TECHNOLOGIES/projects/1) | -> You can also contact the developer via e-mail: osint.technologies@gmail.com +## What to download? -***[Download DPULSE stable ZIP archive (with latest stable changes)](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/main.zip)*** +| Your expectations | Version and link for you | +| --- | --- | +| I want to use only tested and stable version of DPULSE | [DPULSE stable ZIP archive](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/main.zip) | +| I don't mind to use DPULSE with latest changes and you're OK with bugs and issues | [DPULSE rolling ZIP archive](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/rolling.zip) | +| I want to use only one specific version of DPULSE | [See DPULSE releases page](https://github.com/OSINT-TECHNOLOGIES/dpulse/releases) | +| I want to see more detailed installation instructions | [See DPULSE installation guides](https://github.com/OSINT-TECHNOLOGIES/dpulse?tab=readme-ov-file#how-to-install-and-run-dpulse) -***[Download DPULSE rolling ZIP archive (with latest developer commit)](https://github.com/OSINT-TECHNOLOGIES/dpulse/archive/refs/heads/rolling.zip)*** +## How can I contact the developer? +| Reasons to contact | Links & addresses | +| --- | --- | +| I want to talk with developer in person | DM to osint.technologies@gmail.com | +| I want to report some bug or issue, or maybe I have some good idea for developer | [Make a new issue page](https://github.com/OSINT-TECHNOLOGIES/dpulse/issues/new/choose) | # About DPULSE @@ -43,8 +60,8 @@ DPULSE is a software solution for conducting OSINT research in relation to a cer - SSL certificate info - possible vulnerabilities - open ports - - CPEs, used web-technologies and so on. - - It also can download sitemap.xml and robots.txt files from a domain, and, moreover, it can do automated Google Dorking + - CPEs, used web-technologies and so on + - It also can download sitemap.xml and robots.txt files from a domain 2. ***PageSearch standard scan:*** extended subdomains deep search function, which starts in addition to basic scan and which can find: - more e-mail addresses @@ -57,7 +74,10 @@ DPULSE is a software solution for conducting OSINT research in relation to a cer 3. ***PageSearch Sitemap inspection scan:*** sitemap links crawler which starts in addition to basic scan and which can find even more e-mails - +4. ***Dorking scan:*** extended domain research function with prepared Google Dorking databases for different purposes, such as IoT dorking, files dorking, admin panels dorking and so on. Moreover, this mode allows you to create your own custom Google Dorking database + +5. ***API scan:*** extended domain research function with prepared functions for 3rd party APIs usage. Currently DPULSE supports VirusTotal API (for brief domain information gathering) and SecurityTrails API (deep subdomains and DNS enumeration) + Finally, DPULSE compiles all found data into an easy-to-read PDF, HTML or XLSX report by category. It also saves all information about scan in local report storage database, which can be restored later. # How to install and run DPULSE @@ -126,7 +146,7 @@ Then you choose menu item which you want to start. If you have problems with starting installer.sh, you should try to use `dos2unix installer.sh` or `sed -i 's/\r//' installer.sh` commands. -# DPULSE demos +# DPULSE demo and use-cases ### You can start DPULSE and see the main menu on the screen using one of the recommended commands in DPULSE root folder. Don't forget to install all requirements before starting DPULSE diff --git a/apis/api_keys.db b/apis/api_keys.db new file mode 100644 index 0000000..8534e92 Binary files /dev/null and b/apis/api_keys.db differ diff --git a/apis/api_keys_reference.db b/apis/api_keys_reference.db new file mode 100644 index 0000000..0ff949e Binary files /dev/null and b/apis/api_keys_reference.db differ diff --git a/apis/api_securitytrails.py b/apis/api_securitytrails.py new file mode 100644 index 0000000..95bbd72 --- /dev/null +++ b/apis/api_securitytrails.py @@ -0,0 +1,59 @@ +import requests +import sqlite3 +from colorama import Fore, Style + +def api_securitytrails_check(domain): + conn = sqlite3.connect('apis//api_keys.db') + cursor = conn.cursor() + cursor.execute("SELECT api_name, api_key FROM api_keys") + rows = cursor.fetchall() + for row in rows: + api_name, api_key = row + if api_name == 'SecurityTrails': + api_key = str(row[1]) + print(Fore.GREEN + 'Got SecurityTrails API key. Starting SecurityTrails scan...\n') + + subdomains_url = f"https://api.securitytrails.com/v1/domain/{domain}/subdomains?apikey={api_key}" + response = requests.get(subdomains_url) + + url = f"https://api.securitytrails.com/v1/domain/{domain}?apikey={api_key}" + general_response = requests.get(url) + general_data = general_response.json() + + print(Fore.GREEN + "[DOMAIN GENERAL INFORMATION]\n") + print(Fore.GREEN + "Alexa Rank: " + Fore.LIGHTCYAN_EX + f"{general_data['alexa_rank']}") + print(Fore.GREEN + "Apex Domain: " + Fore.LIGHTCYAN_EX + f"{general_data['apex_domain']}") + print(Fore.GREEN + "Hostname: " + Fore.LIGHTCYAN_EX + f"{general_data['hostname']}" + Style.RESET_ALL) + + print(Fore.GREEN + "\n[DNS RECORDS]" + Style.RESET_ALL) + for record_type, record_data in general_data['current_dns'].items(): + print(Fore.GREEN + f"\n[+] {record_type.upper()} RECORDS:" + Style.RESET_ALL) + for value in record_data.get('values', []): + if record_type == 'a': + print(Fore.GREEN + "IP: " + Fore.LIGHTCYAN_EX + f"{value['ip']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['ip_organization']}") + elif record_type == 'mx': + print(Fore.GREEN + "Hostname: " + Fore.LIGHTCYAN_EX + f"{value['hostname']} " + Fore.GREEN + "| Priority: " + Fore.LIGHTCYAN_EX + f"{value['priority']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['hostname_organization']}") + elif record_type == 'ns': + print(Fore.GREEN + "Nameserver: " + Fore.LIGHTCYAN_EX + f"{value['nameserver']} " + Fore.GREEN + "| Organization: " + Fore.LIGHTCYAN_EX + f"{value['nameserver_organization']}") + elif record_type == 'soa': + print(Fore.GREEN + "Email: " + Fore.LIGHTCYAN_EX + f"{value['email']} " + Fore.GREEN + "| TTL: " + Fore.LIGHTCYAN_EX + f"{value['ttl']}") + elif record_type == 'txt': + print(Fore.GREEN + "Value: " + Fore.LIGHTCYAN_EX + f"{value['value']}") + + if response.status_code == 200: + data = response.json() + print(Fore.GREEN + "\n[SUBDOMAINS DEEP ENUMERATION]\n") + print(Fore.GREEN + f"Found " + Fore.LIGHTCYAN_EX + f"{data['subdomain_count']} " + Fore.GREEN + "subdomains") + print(Fore.GREEN + "Subdomains list: ") + for i, subdomain in enumerate(data['subdomains'], start=1): + subdomain_url = f"http://{subdomain}.{domain}" + try: + response = requests.get(subdomain_url, timeout=5) + if response.status_code == 200: + print(Fore.GREEN + f"{i}. " + Fore.LIGHTCYAN_EX + f"{subdomain_url} " + Fore.GREEN + "is alive") + else: + pass + except Exception: + pass + else: + pass diff --git a/apis/api_virustotal.py b/apis/api_virustotal.py new file mode 100644 index 0000000..3585db5 --- /dev/null +++ b/apis/api_virustotal.py @@ -0,0 +1,49 @@ +import requests +import sqlite3 +from colorama import Fore, Style + +def check_domain(domain, api_key): + url = "https://www.virustotal.com/vtapi/v2/domain/report" + params = { + 'domain': domain, + 'apikey': api_key + } + + response = requests.get(url, params=params) + + if response.status_code == 200: + return response.json() + else: + print(f"Error: {response.status_code}") + return None + + +def api_virustotal_check(domain): + conn = sqlite3.connect('apis//api_keys.db') + cursor = conn.cursor() + cursor.execute("SELECT api_name, api_key FROM api_keys") + rows = cursor.fetchall() + for row in rows: + api_name, api_key = row + if api_name == 'VirusTotal': + api_key = str(row[1]) + print(Fore.GREEN + 'Got VirusTotal API key. Starting VirusTotal scan...\n') + + result = check_domain(domain, api_key) + + if result: + print(Fore.GREEN + "[VIRUSTOTAL DOMAIN REPORT]") + print(Fore.GREEN + f"Domain: {result.get('domain')}") + print(Fore.GREEN + f"Categories: {result.get('categories')}") + print(Fore.GREEN + f"Detected URLs: {len(result.get('detected_urls', []))}") + print(Fore.GREEN + f"Detected Samples: {len(result.get('detected_samples', []))}") + print(Fore.GREEN + f"Undetected Samples: {len(result.get('undetected_samples', []))}\n") + print(Fore.LIGHTGREEN_EX + "-------------------------------------------------\n" + Style.RESET_ALL) + conn.close() + else: + print(Fore.RED + "Failed to get domain report\n") + print(Fore.LIGHTGREEN_EX + "-------------------------------------------------\n" + Style.RESET_ALL) + conn.close() + pass + + diff --git a/datagather_modules/crawl_processor.py b/datagather_modules/crawl_processor.py index b7c818b..dc96e42 100644 --- a/datagather_modules/crawl_processor.py +++ b/datagather_modules/crawl_processor.py @@ -115,25 +115,25 @@ def sm_gather(url): for link in links: parsed_url = urlparse(link) hostname = parsed_url.hostname - if hostname and hostname.endswith('facebook.com'): + if hostname and (hostname == 'facebook.com' or hostname.endswith('.facebook.com')): categorized_links['Facebook'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('twitter.com'): + elif hostname and (hostname == 'twitter.com' or hostname.endswith('.twitter.com')): categorized_links['Twitter'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('instagram.com'): + elif hostname and (hostname == 'instagram.com' or hostname.endswith('.instagram.com')): categorized_links['Instagram'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('t.me'): + elif hostname and (hostname == 't.me' or hostname.endswith('.t.me')): categorized_links['Telegram'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('tiktok.com'): + elif hostname and (hostname == 'tiktok.com' or hostname.endswith('.tiktok.com')): categorized_links['TikTok'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('linkedin.com'): + elif hostname and (hostname == 'linkedin.com' or hostname.endswith('.linkedin.com')): categorized_links['LinkedIn'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('vk.com'): + elif hostname and (hostname == 'vk.com' or hostname.endswith('.vk.com')): categorized_links['VKontakte'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('youtube.com'): + elif hostname and (hostname == 'youtube.com' or hostname.endswith('.youtube.com')): categorized_links['YouTube'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('wechat.com'): + elif hostname and (hostname == 'wechat.com' or hostname.endswith('.wechat.com')): categorized_links['WeChat'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('ok.ru'): + elif hostname and (hostname == 'ok.ru' or hostname.endswith('.ok.ru')): categorized_links['Odnoklassniki'].append(urllib.parse.unquote(link)) if not categorized_links['Odnoklassniki']: @@ -214,25 +214,25 @@ def domains_reverse_research(subdomains, report_file_type): for inner_list in subdomain_socials_grouped: for link in inner_list: hostname = urlparse(link).hostname - if hostname and hostname.endswith('facebook.com'): + if hostname and (hostname == 'facebook.com' or hostname.endswith('.facebook.com')): sd_socials['Facebook'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('twitter.com'): + elif hostname and (hostname == 'twitter.com' or hostname.endswith('.twitter.com')): sd_socials['Twitter'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('instagram.com'): + elif hostname and (hostname == 'instagram.com' or hostname.endswith('.instagram.com')): sd_socials['Instagram'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('t.me'): + elif hostname and (hostname == 't.me' or hostname.endswith('.t.me')): sd_socials['Telegram'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('tiktok.com'): + elif hostname and (hostname == 'tiktok.com' or hostname.endswith('.tiktok.com')): sd_socials['TikTok'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('linkedin.com'): + elif hostname and (hostname == 'linkedin.com' or hostname.endswith('.linkedin.com')): sd_socials['LinkedIn'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('vk.com'): + elif hostname and (hostname == 'vk.com' or hostname.endswith('.vk.com')): sd_socials['VKontakte'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('youtube.com'): + elif hostname and (hostname == 'youtube.com' or hostname.endswith('.youtube.com')): sd_socials['YouTube'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('wechat.com'): + elif hostname and (hostname == 'wechat.com' or hostname.endswith('.wechat.com')): sd_socials['WeChat'].append(urllib.parse.unquote(link)) - elif hostname and hostname.endswith('ok.ru'): + elif hostname and (hostname == 'ok.ru' or hostname.endswith('.ok.ru')): sd_socials['Odnoklassniki'].append(urllib.parse.unquote(link)) sd_socials = {k: list(set(v)) for k, v in sd_socials.items()} diff --git a/datagather_modules/data_assembler.py b/datagather_modules/data_assembler.py index 8071526..cff43d7 100644 --- a/datagather_modules/data_assembler.py +++ b/datagather_modules/data_assembler.py @@ -8,6 +8,8 @@ import networking_processor as np from pagesearch_main import normal_search, sitemap_inspection_search from logs_processing import logging +from api_virustotal import api_virustotal_check +from api_securitytrails import api_securitytrails_check try: import requests @@ -65,7 +67,7 @@ def report_preprocessing(self, short_domain, report_file_type): os.makedirs(report_folder, exist_ok=True) return casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, files_ctime, report_ctime - def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag, dorking_flag): + def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, keywords, keywords_flag, dorking_flag, used_api_flag): casename, db_casename, db_creation_date, robots_filepath, sitemap_filepath, sitemap_links_filepath, report_file_type, report_folder, ctime, report_ctime = self.report_preprocessing(short_domain, report_file_type) logging.info(f'### THIS LOG PART FOR {casename} CASE, TIME: {ctime} STARTS HERE') print(Fore.GREEN + "Started scanning domain" + Style.RESET_ALL) @@ -147,6 +149,16 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table)) print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL) + if used_api_flag != ['Empty']: + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL) + if 1 in used_api_flag: + api_virustotal_check(short_domain) + if 2 in used_api_flag: + api_securitytrails_check(short_domain) + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL) + else: + pass + data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials, subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records, robots_txt_result, sitemap_xml_result, sitemap_links_status, @@ -180,13 +192,23 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k if dorking_flag == 'none': dorking_status = 'Google Dorking mode was not selected for this scan' - dorking_file_path = 'Google Dorking mode was not selected for this scan' + dorking_results = ['Google Dorking mode was not selected for this scan'] else: dorking_db_path, table = establishing_dork_db_connection(dorking_flag.lower()) print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL) - dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table)) + dorking_status, dorking_results = dp.transfer_results_to_xlsx(table, dp.get_dorking_query(short_domain, dorking_db_path, table)) print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL) + if used_api_flag != ['Empty']: + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL) + if 1 in used_api_flag: + api_virustotal_check(short_domain) + if 2 in used_api_flag: + api_securitytrails_check(short_domain) + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL) + else: + pass + data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials, subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records, robots_txt_result, sitemap_xml_result, sitemap_links_status, @@ -230,6 +252,16 @@ def data_gathering(self, short_domain, url, report_file_type, pagesearch_flag, k dorking_status, dorking_file_path = dp.save_results_to_txt(report_folder, table, dp.get_dorking_query(short_domain, dorking_db_path, table)) print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: {dorking_flag.upper()} DORKING]\n" + Style.RESET_ALL) + if used_api_flag != ['Empty']: + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN START: API SCANNING]\n" + Style.RESET_ALL) + if 1 in used_api_flag: + api_virustotal_check(short_domain) + if 2 in used_api_flag: + api_securitytrails_check(short_domain) + print(Fore.LIGHTMAGENTA_EX + f"\n[EXTENDED SCAN END: API SCANNING]\n" + Style.RESET_ALL) + else: + pass + data_array = [ip, res, mails, subdomains, subdomains_amount, social_medias, subdomain_mails, sd_socials, subdomain_ip, issuer, subject, notBefore, notAfter, commonName, serialNumber, mx_records, robots_txt_result, sitemap_xml_result, sitemap_links_status, diff --git a/dorking/dorking_handler.py b/dorking/dorking_handler.py index 19a1d3e..51d9489 100644 --- a/dorking/dorking_handler.py +++ b/dorking/dorking_handler.py @@ -18,20 +18,30 @@ def get_dorking_query(short_domain, dorking_db_path, table): print(Fore.GREEN + "Getting dorking query from database") - conn = sqlite3.connect(dorking_db_path) - cursor = conn.cursor() - cursor.execute(f"SELECT dork FROM {table}") - rows = cursor.fetchall() - search_query = [row[0].format(short_domain) for row in rows] - conn.close() - return search_query + try: + conn = sqlite3.connect(dorking_db_path) + cursor = conn.cursor() + cursor.execute(f"SELECT dork FROM {table}") + rows = cursor.fetchall() + search_query = [row[0].format(short_domain) for row in rows] + conn.close() + return search_query + except Exception as e: + print(Fore.RED + f"Error getting dorking query: {e}") + return [] + pass def get_columns_amount(dorking_db_path, table): - conn = sqlite3.connect(dorking_db_path) - cursor = conn.cursor() - cursor.execute(f"SELECT COUNT(*) FROM {table}") - row_count = cursor.fetchone()[0] - conn.close() + try: + conn = sqlite3.connect(dorking_db_path) + cursor = conn.cursor() + cursor.execute(f"SELECT COUNT(*) FROM {table}") + row_count = cursor.fetchone()[0] + except Exception as e: + print(f"Error getting column count: {e}") + return None + finally: + conn.close() return row_count def solid_google_dorking(query, dorking_delay, delay_step, pages=100): @@ -44,25 +54,28 @@ def solid_google_dorking(query, dorking_delay, delay_step, pages=100): result_query = [] request_count = 0 for page in range(pages): - for link in browser.links(): - target = link.attrs['href'] - if (target.startswith('/url?') and not - target.startswith("/url?q=http://webcache.googleusercontent.com")): - target = re.sub(r"^/url\?q=([^&]*)&.*", r"\1", target) - result_query.append(target) - request_count += 1 - if request_count % delay_step == 0: - time.sleep(dorking_delay) try: + for link in browser.links(): + target = link.attrs['href'] + if (target.startswith('/url?') and not + target.startswith("/url?q=http://webcache.googleusercontent.com")): + target = re.sub(r"^/url\?q=([^&]*)&.*", r"\1", target) + result_query.append(target) + request_count += 1 + if request_count % delay_step == 0: + time.sleep(dorking_delay) browser.follow_link(nr=page + 1) except mechanicalsoup.LinkNotFoundError: break - + except Exception as e: + logging.error(f'DORKING PROCESSING: ERROR. REASON: {e}') del result_query[-2:] return result_query except requests.exceptions.ConnectionError as e: print(Fore.RED + "Error while establishing connection with domain. No results will appear. See journal for details" + Style.RESET_ALL) logging.error(f'DORKING PROCESSING: ERROR. REASON: {e}') + except Exception as e: + logging.error(f'DORKING PROCESSING: ERROR. REASON: {e}') def save_results_to_txt(folderpath, table, queries, pages=10): try: @@ -78,14 +91,18 @@ def save_results_to_txt(folderpath, table, queries, pages=10): dorked_query_counter = 0 for i, query in enumerate(queries, start=1): f.write(f"QUERY #{i}: {query}\n") - results = solid_google_dorking(query, dorking_delay, delay_step, pages) - if not results: - f.write("=> NO RESULT FOUND\n") + try: + results = solid_google_dorking(query, dorking_delay, delay_step, pages) + if not results: + f.write("=> NO RESULT FOUND\n") + total_results.append((query, 0)) + else: + total_results.append((query, len(results))) + for result in results: + f.write(f"=> {result}\n") + except Exception as e: + logging.error(f"DORKING PROCESSING: ERROR. REASON: {e}") total_results.append((query, 0)) - else: - total_results.append((query, len(results))) - for result in results: - f.write(f"=> {result}\n") f.write("\n") dorked_query_counter += 1 print(Fore.GREEN + f" Dorking with " + Style.RESET_ALL + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{dorked_query_counter}/{total_dorks_amount}" + Style.RESET_ALL + Fore.GREEN + " dork" + Style.RESET_ALL, end="\r") @@ -112,7 +129,7 @@ def transfer_results_to_xlsx(table, queries, pages=10): dorking_return_list = [] for i, query in enumerate(queries, start=1): dorking_return_list.append(f"QUERY #{i}: {query}\n") - results = solid_google_dorking(query, pages) + results = solid_google_dorking(query, dorking_delay, delay_step) if not results: dorking_return_list.append("NO RESULT FOUND\n") else: diff --git a/dpulse.py b/dpulse.py index f4bc0d8..3868826 100644 --- a/dpulse.py +++ b/dpulse.py @@ -1,16 +1,15 @@ import sys +import os +from colorama import Fore, Style, Back + sys.path.append('datagather_modules') sys.path.append('service') sys.path.append('reporting_modules') sys.path.append('dorking') +sys.path.append('apis') -from colorama import Fore, Style, Back -import cli_init from config_processing import create_config, check_cfg_presence, read_config, print_and_return_config -import db_processing as db -import os -db.db_creation('report_storage.db') cfg_presence = check_cfg_presence() if cfg_presence is True: print(Fore.GREEN + "Global config file presence: OK" + Style.RESET_ALL) @@ -19,18 +18,19 @@ create_config() print(Fore.GREEN + "Successfully generated global config file") +import db_processing as db +import cli_init from dorking_handler import dorks_files_check, get_columns_amount -dorks_files_check() -import pdf_report_creation as pdf_rc -import xlsx_report_creation as xlsx_rc -import html_report_creation as html_rc from data_assembler import DataProcessing -from misc import time_processing, domain_precheck +from logs_processing import logging + +db.db_creation('report_storage.db') + +dorks_files_check() try: import socket import re - import time import webbrowser import sqlite3 import itertools @@ -65,6 +65,11 @@ def run(): domain_patter = r'^[a-zA-Z0-9-]+\.[a-zA-Z]{2,}$' choice = input(Fore.YELLOW + "Enter your choice >> ") if choice == "1": + from misc import time_processing, domain_precheck + import pdf_report_creation as pdf_rc + import xlsx_report_creation as xlsx_rc + import html_report_creation as html_rc + print(Fore.GREEN + "\nImported and activated reporting modules" + Style.RESET_ALL) while True: short_domain = input(Fore.YELLOW + "\nEnter target's domain name (or 'back' to return to the menu) >> ") if short_domain.lower() == "back": @@ -111,14 +116,26 @@ def run(): keywords_flag = 0 if report_filetype.lower() == 'pdf' or report_filetype.lower() == 'xlsx' or report_filetype.lower() == 'html': dorking_flag = input(Fore.YELLOW + "Select Dorking mode [Basic/IoT/Files/Admins/Web/Custom/None] >> ") - #api_flag = input(Fore.YELLOW + "Would you like to use 3rd party API in scan? [Y/N] >> ") - #if api_flag.lower() == 'y': - #print api db content - #write ID which you want to use using comma (ex: 1,3,4) - #elif api_flag.lower() == 'n': - #pass - #else: - #print invalid mode + api_flag = input(Fore.YELLOW + "Would you like to use 3rd party API in scan? [Y/N] >> ") + if api_flag.lower() == 'y': + print(Fore.GREEN + "\nSupported APIs and your keys:\n") + db.select_api_keys('printing') + print(Fore.GREEN + "Pay attention that APIs with red-colored API Key field are unable to use!\n") + to_use_api_flag = input(Fore.YELLOW + "Select APIs IDs you want to use in scan (separated by comma) >> ") + used_api_flag = [int(num) for num in to_use_api_flag.split(',')] + if db.check_api_keys(used_api_flag): + print(Fore.GREEN + 'Found API key. Continuation') + else: + print(Fore.RED + "\nAPI key was not found. Check if you've entered valid API key in API Keys DB") + break + used_api_ui = f'Yes, using APIs with following IDs: {','.join(str(used_api_flag))}' + elif api_flag.lower() == 'n': + used_api_ui = 'No' + used_api_flag = ['Empty'] + pass + else: + print(Fore.RED + "\nInvalid API usage mode" + Style.RESET_ALL) + break if pagesearch_flag.lower() == 'y' or pagesearch_flag.lower() == 'n' or pagesearch_flag.lower() == 'si': if pagesearch_flag.lower() == "n": pagesearch_ui_mark = 'No' @@ -148,12 +165,7 @@ def run(): row_count = get_columns_amount(f'dorking//{custom_db_name}.db', 'dorks') dorking_ui_mark = f'Yes, Custom table dorking ({row_count} dorks)' dorking_flag = str(dorking_flag.lower() + f"+{custom_db_name}.db") - print(Fore.LIGHTMAGENTA_EX + "\n[PRE-SCAN SUMMARY]\n" + Style.RESET_ALL) - print(Fore.GREEN + "Determined target: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + short_domain + Style.RESET_ALL) - print(Fore.GREEN + "Report type: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + report_filetype.lower() + Style.RESET_ALL) - print(Fore.GREEN + "PageSearch conduction: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + pagesearch_ui_mark + Style.RESET_ALL) - print(Fore.GREEN + "Dorking conduction: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + dorking_ui_mark + Style.RESET_ALL) - print(Fore.GREEN + "Case comment: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + case_comment + Style.RESET_ALL + "\n") + cli_init.print_prescan_summary(short_domain, report_filetype.upper(), pagesearch_ui_mark, dorking_ui_mark, used_api_ui, case_comment) print(Fore.LIGHTMAGENTA_EX + "[BASIC SCAN START]\n" + Style.RESET_ALL) spinner_thread = ProgressBar() spinner_thread.start() @@ -161,15 +173,15 @@ def run(): try: if pagesearch_flag.lower() == 'y': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start elif pagesearch_flag.lower() == 'si': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start else: start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start endtime_string = time_processing(end) pdf_rc.report_assembling(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string) @@ -180,15 +192,15 @@ def run(): try: if pagesearch_flag.lower() == 'y': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start elif pagesearch_flag.lower() == 'si': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start else: start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start endtime_string = time_processing(end) xlsx_rc.create_report(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string) @@ -199,16 +211,15 @@ def run(): try: if pagesearch_flag.lower() == 'y': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start elif pagesearch_flag.lower() == 'si': start = time() - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower()) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), keywords_list, keywords_flag, dorking_flag.lower(), used_api_flag) end = time() - start else: start = time() - print(dorking_flag) - data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, str(dorking_flag.lower())) + data_array, report_info_array = data_processing.data_gathering(short_domain, url, report_filetype.lower(), pagesearch_flag.lower(), '', keywords_flag, str(dorking_flag.lower()), used_api_flag) end = time() - start endtime_string = time_processing(end) html_rc.report_assembling(short_domain, url, case_comment, data_array, report_info_array, pagesearch_ui_mark, pagesearch_flag.lower(), endtime_string) @@ -223,7 +234,7 @@ def run(): choice_settings = input(Fore.YELLOW + "Enter your choice >> ") if choice_settings == '1': import configparser - config = print_and_return_config() + print_and_return_config() elif choice_settings == '2': import configparser config = print_and_return_config() @@ -236,23 +247,18 @@ def run(): with open('service//config.ini', 'w') as configfile: config.write(configfile) print(Fore.GREEN + "Configuration updated successfully" + Style.RESET_ALL) - elif choice_settings == '4': + elif choice_settings == '3': with open('journal.log', 'w'): print(Fore.GREEN + "Journal file was successfully cleared" + Style.RESET_ALL) pass - elif choice_settings == '5': + elif choice_settings == '4': continue elif choice == '3': cli.dorking_db_manager() choice_dorking = input(Fore.YELLOW + "Enter your choice >> ") if choice_dorking == '1': from db_creator import manage_dorks - print('\n') - print(Fore.GREEN + "You've entered custom Dorking DB generator!\n" + Style.RESET_ALL) - print(Fore.GREEN + "Remember some rules in order to successfully create your custom Dorking DB:" + Style.RESET_ALL) - print(Fore.GREEN + "[1] - dork_id variable must be unique, starting with 1 and then +1 every new dork" + Style.RESET_ALL) - print(Fore.GREEN + "[2] - When it comes to define domain in dork, put {} in it\n" + Style.RESET_ALL) - print(Fore.GREEN + "Examples: related:{}, site:{} inurl:login and so on\n" + Style.RESET_ALL) + cli_init.print_api_db_msg() ddb_name = input(Fore.YELLOW + "Enter a name for your custom Dorking DB (without any extensions) >> ") manage_dorks(ddb_name) elif choice_dorking == '2': @@ -275,6 +281,45 @@ def run(): else: print(Fore.RED + "\nInvalid menu item. Please select between existing menu items") + elif choice == '5': + cli.api_manager() + print('\n') + choice_api = input(Fore.YELLOW + "Enter your choice >> ") + if choice_api == '1': + print(Fore.GREEN + "\nSupported APIs and your keys:\n") + cursor, conn = db.select_api_keys('updating') + api_id_to_update = input(Fore.YELLOW + "Enter API's ID to update its key >> ") + new_api_key = input(Fore.YELLOW + "Enter new API key >> ") + + try: + cursor.execute(""" + UPDATE api_keys + SET api_key = ? + WHERE id = ? + """, (new_api_key, api_id_to_update)) + + conn.commit() + conn.close() + print(Fore.GREEN + "\nSuccessfully added new API key" + Style.RESET_ALL) + except: + print(Fore.RED + "Something went wrong when adding new API key. See journal for details" + Style.RESET_ALL) + logging.error(f'KEYWORDS SEARCH IN PDF (PAGESEARCH): ERROR. REASON: {e}') + + elif choice_api == '2': + import shutil + try: + os.remove('apis//api_keys.db') + print(Fore.GREEN + "Deleted old API Keys DB") + except FileNotFoundError: + print(Fore.RED + "API Keys DB was not found") + try: + shutil.copyfile('apis//api_keys_reference.db', 'apis//api_keys.db') + print(Fore.GREEN + "Sucessfully restored reference API Keys DB") + except FileNotFoundError: + print(Fore.RED + "Reference API Keys DB was not found") + else: + continue + elif choice == "4": cli.print_db_menu() print('\n') diff --git a/pagesearch/pagesearch_parsers.py b/pagesearch/pagesearch_parsers.py index e646075..140da9c 100644 --- a/pagesearch/pagesearch_parsers.py +++ b/pagesearch/pagesearch_parsers.py @@ -7,6 +7,7 @@ import sys sys.path.append('service') from logs_processing import logging +from cli_init import print_ps_cli_report def extract_text_from_pdf(filename: str) -> str: try: @@ -84,7 +85,6 @@ def subdomains_parser(subdomains_list, report_folder, keywords, keywords_flag): except Exception as e: print(Fore.RED + "Can't access some subdomain. See journal for details") logging.error(f'ACCESSING SUBDOMAIN (PAGESEARCH): ERROR. REASON: {e}') - print(Fore.LIGHTGREEN_EX + "-------------------------------------------------" + Style.RESET_ALL) pass try: @@ -98,6 +98,7 @@ def subdomains_parser(subdomains_list, report_folder, keywords, keywords_flag): search_query_input = soup.find('input', {'name': 'q'}) customization_input = soup.find('input', {'name': 'language'}) passwords = soup.find_all('input', {'type': 'password'}) + print(Fore.LIGHTGREEN_EX + "-------------------------------------------------" + Style.RESET_ALL) print(Fore.GREEN + "Page URL: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{url}" + Style.RESET_ALL) print(Fore.GREEN + "Page title: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{title}" + Style.RESET_ALL) print(Fore.GREEN + "Found e-mails: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{', '.join(emails)}" + Style.RESET_ALL) @@ -130,7 +131,6 @@ def subdomains_parser(subdomains_list, report_folder, keywords, keywords_flag): except Exception as e: print(Fore.RED + "Error while getting detailed info on web resource. See journal for details") logging.error(f'WEB RESOURCE ADDITIONAL INFO GATHERING (PAGESEARCH): ERROR. REASON: {e}') - print(Fore.LIGHTGREEN_EX + "-------------------------------------------------" + Style.RESET_ALL) pass try: @@ -143,110 +143,36 @@ def subdomains_parser(subdomains_list, report_folder, keywords, keywords_flag): document_url = 'http://' + url + href print(Fore.GREEN + "Found document: " + Fore.LIGHTCYAN_EX + Style.BRIGHT + f"{document_url}" + Style.RESET_ALL) response = requests.get(document_url) + file_extensions = { + '.docx': 'extracted_{}.docx', + '.xlsx': 'extracted_{}.xlsx', + '.pdf': 'extracted_{}.pdf', + '.csv': 'extracted_{}.csv', + '.pptx': 'extracted_{}.pptx', + '.doc': 'extracted_{}.doc', + '.ppt': 'extracted_{}.ppt', + '.xls': 'extracted_{}.xls', + '.json': 'extracted_{}.json', + '.txt': 'extracted_{}.txt', + '.sql': 'extracted_{}.sql', + '.db': 'extracted_{}.db', + '.config': 'extracted_{}.config', + '.conf': 'extracted_{}.conf' + } if response.status_code == 200: - if href and href.lower().endswith('.docx'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.docx") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.xlsx'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.xlsx") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.pdf'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.pdf") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.csv'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.csv") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.pptx'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.pptx") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.doc'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.doc") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.ppt'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.ppt") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.xls'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.xls") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.json'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.json") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.txt'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.txt") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.sql'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.sql") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.db'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.db") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.config'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.config") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - elif href and href.lower().endswith('.conf'): - filename = os.path.basename(href) - extracted_path = os.path.join(ps_docs_path, f"extracted_{os.path.splitext(filename)[0]}.conf") - with open(extracted_path, 'wb') as file: - file.write(response.content) - files_counter += 1 - print(Fore.GREEN + "File was successfully saved") - print(Fore.LIGHTGREEN_EX + "-------------------------------------------------") + if href: + file_extension = os.path.splitext(href.lower())[1] + if file_extension in file_extensions: + filename = os.path.basename(href) + extracted_path = os.path.join(ps_docs_path, file_extensions[file_extension].format( + os.path.splitext(filename)[0])) + with open(extracted_path, 'wb') as file: + file.write(response.content) + files_counter += 1 + print(Fore.GREEN + "File was successfully saved") except Exception as e: print(Fore.RED + "This file can't be accessed to extract it. See journal for details") logging.error(f'FILES EXTRACTION (PAGESEARCH): ERROR. REASON: {e}') - print(Fore.LIGHTGREEN_EX + "-------------------------------------------------" + Style.RESET_ALL) pass ps_emails_list = [x for x in total_emails if x] @@ -265,15 +191,7 @@ def subdomains_parser(subdomains_list, report_folder, keywords, keywords_flag): print(Fore.RED + f"Can't find keywords. See journal for details") logging.error(f'KEYWORDS SEARCH IN PDF (PAGESEARCH): ERROR. REASON: {e}') pdf_with_keywords = 0 - print(Fore.LIGHTGREEN_EX + "-------------------------------------------------" + Style.RESET_ALL) - print(Fore.GREEN + f"\nDuring subdomains analysis:\n[+] Total {len(subdomains_list)} subdomains were checked") - print(Fore.GREEN + f"[+] Among them, {accessible_subdomains} subdomains were accessible") - print(Fore.GREEN + f"[+] In result, {len(ps_emails_return)} unique e-mail addresses were found") - print(Fore.GREEN + f"[+] Also, {files_counter} files were extracted") - print(Fore.GREEN + f"[+] Found {cookies_counter} cookies with values") - print(Fore.GREEN + f"[+] Found {api_keys_counter} API keys") - print(Fore.GREEN + f"[+] Found {website_elements_counter} different web page elements") - print(Fore.GREEN + f"[+] Found {exposed_passwords_counter} exposed passwords") + print_ps_cli_report(subdomains_list, accessible_subdomains, ps_emails_return, files_counter, cookies_counter, api_keys_counter, website_elements_counter, exposed_passwords_counter) if keywords_flag == 0: print(Fore.RED + "[+] Keywords were not gathered because of None user input") diff --git a/poetry.lock b/poetry.lock index 23e2488..fd28251 100644 --- a/poetry.lock +++ b/poetry.lock @@ -283,38 +283,38 @@ files = [ [[package]] name = "cryptography" -version = "43.0.1" +version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] [package.dependencies] @@ -327,7 +327,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1348,13 +1348,13 @@ tinycss2 = ">=0.6.0" [[package]] name = "tinycss2" -version = "1.3.0" +version = "1.4.0" description = "A tiny CSS parser" optional = false python-versions = ">=3.8" files = [ - {file = "tinycss2-1.3.0-py3-none-any.whl", hash = "sha256:54a8dbdffb334d536851be0226030e9505965bb2f30f21a4a82c55fb2a80fae7"}, - {file = "tinycss2-1.3.0.tar.gz", hash = "sha256:152f9acabd296a8375fbca5b84c961ff95971fcfc32e79550c8df8e29118c54d"}, + {file = "tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289"}, + {file = "tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7"}, ] [package.dependencies] diff --git a/pyproject.toml b/pyproject.toml index 241f6a8..bbaa278 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "dpulse" -version = "1.1.2" +version = "1.1.3" description = "Convenient,fast and user-friendly collector of domain information from Open-Sources" authors = ["OSINT-TECHNOLOGIES