initial commit
This commit is contained in:
commit
d8a4f66b97
7
feeds/advisories.txt
Normal file
7
feeds/advisories.txt
Normal file
@ -0,0 +1,7 @@
|
||||
https://isc.sans.edu/rssfeed_full.xml
|
||||
https://us-cert.cisa.gov/ics/advisories/advisories.xml
|
||||
https://us-cert.cisa.gov/ncas/analysis-reports.xml
|
||||
https://www.cisecurity.org/feed/advisories
|
||||
https://nao-sec.org/feed
|
||||
https://www.ncsc.gov.uk/api/1/services/v1/report-rss-feed.xml
|
||||
https://www.ncsc.gov.uk/api/1/services/v1/guidance-rss-feed.xml
|
0
feeds/alerts.txt
Normal file
0
feeds/alerts.txt
Normal file
20
feeds/bugbounty.txt
Normal file
20
feeds/bugbounty.txt
Normal file
@ -0,0 +1,20 @@
|
||||
https://medium.com/feed/bugbountywriteup/tagged/bug-bounty
|
||||
https://blog.intigriti.com/feed/
|
||||
https://github.blog/tag/bug-bounty/feed/
|
||||
https://medium.com/feed/immunefi
|
||||
https://ysamm.com/?feed=rss2
|
||||
https://www.openbugbounty.org/blog/feed/
|
||||
https://bugbounter.com/feed/
|
||||
https://infosecwriteups.com/feed
|
||||
https://blog.detectify.com/feed
|
||||
https://portswigger.net/research/rss
|
||||
https://blog.zsec.uk/feed
|
||||
https://www.pmnh.site/index.xml
|
||||
https://buer.haus/feed
|
||||
https://blog.appsecco.com/feed
|
||||
https://0xdf.gitlab.io/feed.xml
|
||||
https://securitytrails.com/blog.rss
|
||||
https://www.n00py.io/feed
|
||||
https://itm4n.github.io/feed
|
||||
https://spaceraccoon.dev/feed.xml
|
||||
https://sploitus.com/rss
|
157
feeds/feeds.txt
Normal file
157
feeds/feeds.txt
Normal file
@ -0,0 +1,157 @@
|
||||
https://feeds.feedburner.com/TheHackersNews?format=xml
|
||||
https://www.grahamcluley.com/feed/
|
||||
https://www.schneier.com/blog/atom.xml
|
||||
http://krebsonsecurity.com/feed/
|
||||
https://www.csoonline.com/feed/
|
||||
https://www.darkreading.com/rss/all.xml
|
||||
https://www.troyhunt.com/rss/
|
||||
http://feeds.feedburner.com/eset/blog
|
||||
https://www.infosecurity-magazine.com/rss/news/
|
||||
https://www.jisasoftech.com/feed/
|
||||
https://simeononsecurity.ch/index.xml
|
||||
https://fidelissecurity.com/feed/
|
||||
https://www.heroictec.com/feed/
|
||||
https://cyberbuilders.substack.com/feed
|
||||
https://www.infoguardsecurity.com/feed/
|
||||
https://underdefense.com/feed/
|
||||
https://medium.com/feed/@vaceituno
|
||||
https://cyble.com/feed/
|
||||
https://protegent360.com/blog/feed/
|
||||
https://www.varutra.com/feed/
|
||||
https://ventureinsecurity.net/feed
|
||||
https://feeds.feedburner.com/mattpalmeroncybersecurity
|
||||
https://medium.com/feed/@2ndsightlab
|
||||
https://davinciforensics.co.za/cybersecurity/feed/
|
||||
https://truefort.com/feed/
|
||||
https://www.secops-blogger-newsl.com/blog-feed.xml
|
||||
https://cybersecurity.att.com/site/blog-all-rss
|
||||
https://blogs.cisco.com%20/security/feed
|
||||
https://www.mcafee.com/blogs/feed/
|
||||
https://www.nist.gov/blogs/cybersecurity-insights/rss.xml
|
||||
http://feeds.trendmicro.com/TrendMicroResearch
|
||||
https://www.bleepingcomputer.com/feed
|
||||
http://www.techrepublic.com/rssfeeds/topic/security/?feedType=rssfeeds
|
||||
https://www.computerworld.com/uk/category/security/index.rss
|
||||
https://www.proofpoint.com/us/rss.xml
|
||||
https://www.scnsoft.com/blog/category/information-security/atom
|
||||
https://www.identityiq.com/feed/
|
||||
https://blogs.quickheal.com/feed/
|
||||
https://www.webroot.com/blog/feed/
|
||||
https://blog.zonealarm.com/feed/
|
||||
https://www.upguard.com/blog/rss.xml
|
||||
https://www.seqrite.com/blog/feed/
|
||||
https://blog.pcisecuritystandards.org/rss.xml
|
||||
https://heimdalsecurity.com/blog/feed/
|
||||
https://sectigostore.com/blog/feed/
|
||||
http://securityaffairs.co/wordpress/feed
|
||||
https://feeds.feedburner.com/govtech/blogs/lohrmann_on_infrastructure
|
||||
https://www.itgovernance.co.uk/blog/category/cyber-security/feed
|
||||
https://lab.wallarm.com/feed/
|
||||
https://www.tripwire.com/state-of-security/feed/
|
||||
https://www.secpod.com/blog/feed/
|
||||
https://www.cheapsslshop.com/blog/feed/
|
||||
https://www.cybertalk.org/feed/
|
||||
https://secureblitz.com/feed/
|
||||
https://www.binarydefense.com/feed/
|
||||
https://www.cyberdefensemagazine.com/feed/
|
||||
https://www.logpoint.com/en/feed/
|
||||
https://cyberhoot.com/category/blog/feed/
|
||||
https://socprime.com/feed/
|
||||
https://hackercombat.com/feed/
|
||||
https://www.pivotpointsecurity.com/feed/
|
||||
https://www.clearnetwork.com/feed/
|
||||
https://blog.securityinnovation.com/rss.xml
|
||||
http://feeds.feedburner.com/GoogleOnlineSecurityBlog
|
||||
https://www.blackfog.com/feed/
|
||||
https://blog.entersoftsecurity.com/feed/
|
||||
https://www.netsparker.com/blog/rss/
|
||||
https://taosecurity.blogspot.com/feeds/posts/default?alt=rss
|
||||
https://www.lastwatchdog.com/feed/
|
||||
https://marcoramilli.com/feed/
|
||||
https://mazebolt.com/feed/
|
||||
https://binaryblogger.com/feed/
|
||||
https://any.run/cybersecurity-blog/feed/
|
||||
http://www.veracode.com/blog/feed/
|
||||
https://www.helpnetsecurity.com/feed/
|
||||
https://www.cm-alliance.com/cybersecurity-blog/rss.xml
|
||||
https://www.vistainfosec.com/feed/
|
||||
https://dataprivacymanager.net/feed/
|
||||
https://wesecureapp.com/feed/
|
||||
https://blog.g5cybersecurity.com/feed/
|
||||
https://www.flyingpenguin.com/?feed=rss2
|
||||
https://adamlevin.com/feed/
|
||||
https://be4sec.com/feed/
|
||||
https://www.erdalozkaya.com/feed/
|
||||
https://thecybermaniacs.com/cm-blog/rss.xml
|
||||
https://virtualattacks.com/feed/
|
||||
https://cnsight.io/blog/feed/
|
||||
https://www.idsalliance.org/feed/
|
||||
https://www.exploitone.com/feed/
|
||||
https://www.tsfactory.com/forums/blogs/category/infosec-digest/feed/
|
||||
https://www.cyberpilot.io/cyberpilot-blog/rss.xml
|
||||
https://www.ignyteplatform.com/feed/
|
||||
https://www.canarytrap.com/feed/
|
||||
https://www.secureblink.com/rss-feeds/threat-feed
|
||||
https://www.virtru.com/blog/rss.xml
|
||||
https://www.cybersecuritycloudexpo.com/feed/
|
||||
https://www.theguardian.com/technology/data-computer-security/rss
|
||||
https://threatpost.com/feed/
|
||||
https://nakedsecurity.sophos.com/feed/
|
||||
https://www.jbspeakr.cc/index.xml
|
||||
https://personalprivacyonline.com/feed/
|
||||
https://olukaiiisosicyber.tech/f.atom
|
||||
https://olukaiiisoshybrideducyber.tech/feed/
|
||||
https://internetsafetybrigade.org/our-blog/f.atom
|
||||
https://medium.com/feed/@d0znpp
|
||||
https://itsecuritycentral.teramind.co/feed/
|
||||
https://trustarc.com/blog/feed/
|
||||
https://www.acunetix.com/blog/feed/
|
||||
https://blog.360quadrants.com/feed/
|
||||
https://www.cyberdb.co/blog/feed/
|
||||
https://privacysavvy.com/feed/
|
||||
https://techtalk.pcmatic.com/feed/
|
||||
https://www.twingate.com/blog.rss.xml
|
||||
https://securityparrot.com/feed/
|
||||
https://www.cybercrimeswatch.com/feed/
|
||||
https://www.reveantivirus.com/blog/lan/en/feed
|
||||
https://www.isdecisions.com/blog/feed/
|
||||
https://www.lynxtechnologypartners.com/blog/feed/
|
||||
https://www.getcurricula.com/feed
|
||||
https://securitymadesimple.org/feed/
|
||||
https://itega.org/feed/
|
||||
https://zeno-sec.com/feed/
|
||||
https://aghiathchbib.com/feed/
|
||||
https://feeds.feedburner.com/ckdiii
|
||||
https://blog.blackswansecurity.com/feed/
|
||||
https://blog.infinigate.co.uk/rss.xml
|
||||
https://gatefy.com/feed/
|
||||
https://www.securedyou.com/feed/
|
||||
https://www.empowerelearning.com/blog/feed/
|
||||
https://blog.zartech.net/feed/
|
||||
https://blog.itsecurityexpert.co.uk/feeds/posts/default?alt=atom
|
||||
http://securityweekly.com/podcast/psw.xml
|
||||
http://feeds.feedburner.com/high-tech_bridge_corporate_news
|
||||
https://info-savvy.com/feed/
|
||||
https://www.zerodayinitiative.com/rss/published/
|
||||
https://cvefeed.io/rssfeed/newsroom.xml
|
||||
https://cvefeed.io/rssfeed/severity/high.xml
|
||||
https://cvefeed.io/rssfeed/latest.xml
|
||||
https://www.zdnet.com/topic/security/rss.xml
|
||||
https://www.wired.com/feed/category/security/latest/rss
|
||||
http://www.malware-traffic-analysis.net/blog-entries.rss
|
||||
https://www.intezer.com/blog/feed/
|
||||
https://www.hackread.com/feed/
|
||||
https://www.hackmageddon.com/feed/
|
||||
https://www.hackerone.com/blog.rss
|
||||
https://www.theguardian.com/technology/data-computer-security/rss
|
||||
https://www.cio.com/category/security/index.rss
|
||||
https://medium.com/feed/anton-on-security
|
||||
https://arstechnica.com/tag/security/feed/
|
||||
https://www.ncsc.gov.uk/api/1/services/v1/news-rss-feed.xml
|
||||
https://www.ncsc.gov.uk/api/1/services/v1/blog-post-rss-feed.xml
|
||||
https://www.xcitium.com/blog/rss
|
||||
https://neoxnetworks.com/blog/rss
|
||||
https://danielmiessler.com/rss
|
||||
https://krebsonsecurity.com/rss
|
||||
https://news.sophos.com/en-us/category/serious-security/rss
|
||||
https://www.itsecurityguru.org/feed
|
9
lists/github_users.txt
Normal file
9
lists/github_users.txt
Normal file
@ -0,0 +1,9 @@
|
||||
https://github.com/EbookFoundation
|
||||
https://github.com/witchdocsec
|
||||
https://github.com/orgs/malectricasoftware/repositories
|
||||
https://github.com/daffainfo
|
||||
https://github.com/swisskyrepo
|
||||
https://github.com/sundowndev
|
||||
https://github.com/hak5
|
||||
https://github.com/JohnHammond
|
||||
https://github.com/infosecn1nja
|
221
newsbot.py
Normal file
221
newsbot.py
Normal file
@ -0,0 +1,221 @@
|
||||
import feedparser
|
||||
import requests
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from dateutil import parser
|
||||
from typing import List, Dict
|
||||
from bs4 import BeautifulSoup
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import schedule
|
||||
|
||||
# Webhook URLs
|
||||
DEFAULT_WEBHOOK_URL "WEBHOOK" = # Webhook for feeds.txt
|
||||
ADVISORES_WEBHOOK = "WEBHOOK" # Webhook for threatintel.txt
|
||||
ALERTS_WEBHOOK = "WEBHOOK"
|
||||
BUTBOUNTY_WEBHOOK = WEBHOOK"
|
||||
# File paths
|
||||
PROCESSED_LINKS_FILE = 'processed_links.txt' # File to store processed links
|
||||
ADVISORES = 'feeds/advisories.txt' # File for threat intel feeds (common webhook)
|
||||
FEEDS_FILE = 'feeds/feeds.txt' # File for regular feeds (different webhook)
|
||||
ALERTS = "feeds/alerts.txt"
|
||||
BUGBOUNTY = "feeds/bugbounty.txt"
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(filename='rss_feed_watcher.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
||||
|
||||
# Helper function to send an embed message to Discord
|
||||
def send_discord_embed(title: str, link: str, published: datetime, description: str, webhook_url: str):
|
||||
embed = {
|
||||
"embeds": [{
|
||||
"title": title,
|
||||
"url": link,
|
||||
"description": description,
|
||||
"color": 5814783, # Hex color code for the embed (optional)
|
||||
"timestamp": published.isoformat() # ISO format timestamp
|
||||
}]
|
||||
}
|
||||
try:
|
||||
response = requests.post(webhook_url, json=embed)
|
||||
if response.status_code != 204:
|
||||
logging.error(f"Failed to send message to Discord: {response.status_code} {response.text}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"Exception while sending message to Discord: {e}")
|
||||
|
||||
# Helper function to clean HTML and extract plain text
|
||||
def clean_html(html_content: str) -> str:
|
||||
soup = BeautifulSoup(html_content, 'html.parser')
|
||||
text = soup.get_text(separator='\n').strip()
|
||||
text = re.sub(r'\n+', ' ', text)
|
||||
text = re.sub(r'(read more|continue reading|more details|[ \t]*\n[ \t]*\n[ \t]*)', '', text, flags=re.IGNORECASE)
|
||||
return text.strip()
|
||||
|
||||
# Helper function to get posts from RSS feed
|
||||
def get_recent_posts(feed_url: str, webhook_url: str, since_time: datetime, processed_links: set) -> List[dict]:
|
||||
try:
|
||||
response = requests.get(feed_url, timeout=10)
|
||||
response.raise_for_status() # Raise an error for bad HTTP responses
|
||||
feed = feedparser.parse(response.content)
|
||||
recent_posts = []
|
||||
|
||||
for entry in feed.entries:
|
||||
# Try to get and parse the published date from the entry
|
||||
published = getattr(entry, 'published', None)
|
||||
if published:
|
||||
try:
|
||||
published_time = parser.parse(published).replace(tzinfo=None)
|
||||
except (ValueError, TypeError):
|
||||
logging.warning(f"Skipping entry with invalid published date: {published}")
|
||||
continue
|
||||
else:
|
||||
logging.warning("Skipping entry without published date")
|
||||
continue
|
||||
|
||||
if published_time > since_time:
|
||||
link = entry.link
|
||||
if link in processed_links:
|
||||
continue # Skip already processed posts
|
||||
|
||||
description = getattr(entry, 'description', '')
|
||||
plain_text_description = clean_html(description)
|
||||
|
||||
recent_posts.append({
|
||||
'title': entry.title,
|
||||
'link': link,
|
||||
'published': published_time,
|
||||
'description': plain_text_description
|
||||
})
|
||||
|
||||
# Add link to the set of processed links
|
||||
processed_links.add(link)
|
||||
|
||||
# Send an embed message to Discord for each new post
|
||||
send_discord_embed(entry.title, link, published_time, plain_text_description, webhook_url)
|
||||
|
||||
return recent_posts
|
||||
except requests.exceptions.Timeout as e:
|
||||
logging.error(f"Request timeout for {feed_url}: {e}")
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logging.error(f"Connection error for {feed_url}: {e}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"HTTP request failed for {feed_url}: {e}")
|
||||
|
||||
# Load processed links from file
|
||||
def load_processed_links() -> set:
|
||||
if os.path.exists(PROCESSED_LINKS_FILE):
|
||||
with open(PROCESSED_LINKS_FILE, 'r') as file:
|
||||
return set(line.strip() for line in file)
|
||||
return set()
|
||||
|
||||
# Save processed links to file
|
||||
def save_processed_links(processed_links: set):
|
||||
with open(PROCESSED_LINKS_FILE, 'w') as file:
|
||||
for link in processed_links:
|
||||
file.write(f"{link}\n")
|
||||
|
||||
# Helper function to load URLs from a file (feed or threat intel)
|
||||
def load_feed_urls(file_path: str) -> List[str]:
|
||||
if os.path.exists(file_path):
|
||||
with open(file_path, 'r') as file:
|
||||
return [line.strip() for line in file if line.strip()]
|
||||
logging.error(f"{file_path} not found.")
|
||||
return []
|
||||
|
||||
# Function to process threat intel feeds
|
||||
def process_threatintel_feeds(threatintel_feeds: List[str], since_time: datetime, processed_links: set):
|
||||
for feed_url in threatintel_feeds:
|
||||
logging.info(f"Checking threat intel feed: {feed_url}")
|
||||
recent_posts = get_recent_posts(feed_url, ADVISORES_WEBHOOK, since_time, processed_links)
|
||||
try:
|
||||
for post in recent_posts:
|
||||
logging.info(f"New threat intel post: {post['title']}")
|
||||
logging.info(f"Link: {post['link']}")
|
||||
logging.info(f"Published: {post['published']}")
|
||||
logging.info(f"Description: {post['description']}")
|
||||
logging.info("-" * 40)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Function to process regular feeds
|
||||
def process_regular_feeds(regular_feeds: List[str], since_time: datetime, processed_links: set):
|
||||
for feed_url in regular_feeds:
|
||||
logging.info(f"Checking regular feed: {feed_url}")
|
||||
recent_posts = get_recent_posts(feed_url, DEFAULT_WEBHOOK_URL, since_time, processed_links)
|
||||
try:
|
||||
for post in recent_posts:
|
||||
logging.info(f"New regular post: {post['title']}")
|
||||
logging.info(f"Link: {post['link']}")
|
||||
logging.info(f"Published: {post['published']}")
|
||||
logging.info(f"Description: {post['description']}")
|
||||
logging.info("-" * 40)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
def process_alert_feeds(regular_feeds: List[str], since_time: datetime, processed_links: set):
|
||||
for feed_url in regular_feeds:
|
||||
logging.info(f"Checking regular feed: {feed_url}")
|
||||
recent_posts = get_recent_posts(feed_url, ALERTS_WEBHOOK, since_time, processed_links)
|
||||
try:
|
||||
for post in recent_posts:
|
||||
logging.info(f"New regular post: {post['title']}")
|
||||
logging.info(f"Link: {post['link']}")
|
||||
logging.info(f"Published: {post['published']}")
|
||||
logging.info(f"Description: {post['description']}")
|
||||
logging.info("-" * 40)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
def process_bugbounty_feeds(regular_feeds: List[str], since_time: datetime, processed_links: set):
|
||||
for feed_url in regular_feeds:
|
||||
logging.info(f"Checking regular feed: {feed_url}")
|
||||
recent_posts = get_recent_posts(feed_url, BUTBOUNTY_WEBHOOK, since_time, processed_links)
|
||||
try:
|
||||
for post in recent_posts:
|
||||
logging.info(f"New regular post: {post['title']}")
|
||||
logging.info(f"Link: {post['link']}")
|
||||
logging.info(f"Published: {post['published']}")
|
||||
logging.info(f"Description: {post['description']}")
|
||||
logging.info("-" * 40)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# Main function to run the watcher
|
||||
def rss_feed_watcher():
|
||||
print("RUNNING...")
|
||||
processed_links = load_processed_links() # Load previously processed links
|
||||
regular_feeds = load_feed_urls(FEEDS_FILE)
|
||||
# Load threat intel and regular feeds
|
||||
threatintel_feeds = load_feed_urls(ADVISORES)
|
||||
bug_bounty_feeds = load_feed_urls(BUGBOUNTY)
|
||||
alert_feeds = load_feed_urls(ALERTS)
|
||||
|
||||
# Get the timestamp to compare recent posts (last 30 minutes)
|
||||
since_time = datetime.now() - timedelta(hours=12)
|
||||
since_time = since_time.replace(tzinfo=None)
|
||||
print("going over bug bounties...")
|
||||
process_bugbounty_feeds(bug_bounty_feeds,since_time, processed_links)
|
||||
print("going over regular feeds...")
|
||||
process_regular_feeds(regular_feeds, since_time, processed_links)
|
||||
# Process threat intel feeds
|
||||
print("going over threat intel...")
|
||||
process_threatintel_feeds(threatintel_feeds, since_time, processed_links)
|
||||
print("going over alerts...")
|
||||
process_alert_feeds(alert_feeds, since_time, processed_links)
|
||||
|
||||
# Save updated processed links
|
||||
save_processed_links(processed_links)
|
||||
|
||||
# Schedule the RSS feed watcher to run every 30 minutes
|
||||
def schedule_rss_watcher():
|
||||
schedule.every(1).hours.do(rss_feed_watcher)
|
||||
logging.info("RSS Feed Watcher scheduled to run every 30 minutes.")
|
||||
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
rss_feed_watcher()
|
||||
# Start the scheduled watcher
|
||||
schedule_rss_watcher()
|
263
test2.py
Normal file
263
test2.py
Normal file
@ -0,0 +1,263 @@
|
||||
import requests
|
||||
import time
|
||||
import schedule
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
# Global variable to store previously fetched CVEs and their last modified timestamps
|
||||
previous_cves = {} # Dictionary to store CVE IDs and their lastModified timestamps
|
||||
FILE_PATH = 'previous_cves.json'
|
||||
|
||||
def load_previous_cves():
|
||||
"""Load previously fetched CVEs from a file."""
|
||||
global previous_cves
|
||||
previous_cves = {}
|
||||
if os.path.exists(FILE_PATH):
|
||||
with open(FILE_PATH, 'r') as file:
|
||||
try:
|
||||
# Load JSON data from the file
|
||||
data = json.load(file)
|
||||
|
||||
# Ensure the data is a list of dictionaries
|
||||
if isinstance(data, list):
|
||||
previous_cves = { (cve.get('id'), cve.get('lastModified', 'Not available')): cve.get('lastModified', 'Not available') for cve in data if 'id' in cve }
|
||||
else:
|
||||
print("Error: JSON data is not in the expected format.")
|
||||
except json.JSONDecodeError:
|
||||
print("Error: Failed to decode JSON from the file.")
|
||||
|
||||
def save_previous_cves():
|
||||
"""Save previously fetched CVEs to a file."""
|
||||
with open(FILE_PATH, 'w') as file:
|
||||
# Convert the dictionary to a list of dictionaries for saving
|
||||
data_to_save = [{'id': cve_id, 'lastModified': last_modified} for (cve_id, last_modified), _ in previous_cves.items()]
|
||||
json.dump(data_to_save, file, indent=2)
|
||||
|
||||
def send_webhook(embed_data):
|
||||
"""Send an embed to a webhook URL."""
|
||||
webhook_url = '' # Replace with your webhook URL
|
||||
try:
|
||||
response = requests.post(webhook_url, json={'embeds': [embed_data]})
|
||||
response.raise_for_status()
|
||||
print(f"Webhook sent successfully: {response.status_code}")
|
||||
except requests.RequestException as e:
|
||||
print(f"Error sending webhook: {e}")
|
||||
|
||||
def format_embed(cve):
|
||||
"""Format CVE data into an embed."""
|
||||
# Extract CVE ID
|
||||
cve_id = cve.get('cve', {}).get('id', 'Unknown CVE ID')
|
||||
|
||||
# Get the English description, or fallback to 'No description available'
|
||||
description = next(
|
||||
(desc.get('value') for desc in cve.get('cve', {}).get('descriptions', []) if desc.get('lang') == 'en'),
|
||||
'No description available'
|
||||
)
|
||||
|
||||
# Fetch CVSS data
|
||||
cvss_metrics = cve.get('cve', {}).get('metrics', {}).get('cvssMetricV31', [])
|
||||
|
||||
# Check if NIST data is available
|
||||
nist_data = next((metric for metric in cvss_metrics if metric.get('source') == 'nvd@nist.gov'), None)
|
||||
|
||||
# Use NIST data if available, otherwise fallback to other available metrics
|
||||
if nist_data:
|
||||
cvss_data = nist_data.get('cvssData', {})
|
||||
else:
|
||||
cvss_data = cvss_metrics[0].get('cvssData', {}) if cvss_metrics else {}
|
||||
|
||||
# Extract CVSS-related fields
|
||||
base_score = cvss_data.get('baseScore', 'Not available')
|
||||
base_severity = cvss_data.get('baseSeverity', 'Not available')
|
||||
attack_vector = cvss_data.get('attackVector', 'Not available')
|
||||
attack_complexity = cvss_data.get('attackComplexity', 'Not available')
|
||||
privileges_required = cvss_data.get('privilegesRequired', 'Not available')
|
||||
user_interaction = cvss_data.get('userInteraction', 'Not available')
|
||||
vector_string = cvss_data.get('vectorString', 'Not available')
|
||||
|
||||
# Set color based on base severity
|
||||
severity_colors = {
|
||||
'LOW': 0x00FF00, # Green for Low severity
|
||||
'MEDIUM': 0xFFFF00, # Yellow for Medium severity
|
||||
'HIGH': 0xFF0000, # Red for High severity
|
||||
'CRITICAL': 0xFF0000 # Red for Critical severity (added if needed)
|
||||
}
|
||||
color = severity_colors.get(base_severity, 0x0000FF) # Default to blue if severity is unknown
|
||||
|
||||
# Extract and format dates
|
||||
published_date = cve.get('cve', {}).get('published', 'Not available')
|
||||
last_modified_date = cve.get('cve', {}).get('lastModified', 'Not available')
|
||||
|
||||
try:
|
||||
published_date = datetime.fromisoformat(published_date).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
last_modified_date = datetime.fromisoformat(last_modified_date).strftime('%Y-%m-%d %H:%M:%S UTC')
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Check if the system is vulnerable from configurations
|
||||
configurations = cve.get('cve', {}).get('configurations', [])
|
||||
vulnerable_criteria = []
|
||||
|
||||
for config in configurations:
|
||||
for node in config.get('nodes', []):
|
||||
for cpe_match in node.get('cpeMatch', []):
|
||||
if cpe_match.get('vulnerable', False):
|
||||
vulnerable_criteria.append(cpe_match.get('criteria', 'N/A'))
|
||||
|
||||
# Collect references, if any exist
|
||||
references = [ref.get('url') for ref in cve.get('cve', {}).get('references', []) if ref.get('url')]
|
||||
|
||||
# Create the embed structure
|
||||
embed = {
|
||||
'title': f'CVE Details: {cve_id}',
|
||||
'description': description,
|
||||
'color': color,
|
||||
'fields': [
|
||||
{
|
||||
'name': 'CVSS Score',
|
||||
'value': str(base_score) if base_score != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'CVSS Severity',
|
||||
'value': base_severity if base_severity != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Attack Vector',
|
||||
'value': attack_vector if attack_vector != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Attack Complexity',
|
||||
'value': attack_complexity if attack_complexity != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Privileges Required',
|
||||
'value': privileges_required if privileges_required != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'User Interaction',
|
||||
'value': user_interaction if user_interaction != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'CVSS Vector',
|
||||
'value': vector_string if vector_string != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Published Date',
|
||||
'value': published_date if published_date != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Last Modified Date',
|
||||
'value': last_modified_date if last_modified_date != 'Not available' else 'Not available',
|
||||
'inline': True
|
||||
},
|
||||
{
|
||||
'name': 'Vulnerable Systems',
|
||||
'value': '\n'.join(vulnerable_criteria) if vulnerable_criteria else 'No vulnerable systems specified',
|
||||
'inline': False
|
||||
}
|
||||
],
|
||||
'footer': {
|
||||
'text': 'Source: NVD'
|
||||
},
|
||||
'url': f'https://nvd.nist.gov/vuln/detail/{cve_id}'
|
||||
}
|
||||
|
||||
# Add references if any exist
|
||||
if references:
|
||||
embed['fields'].append({
|
||||
'name': 'References',
|
||||
'value': '\n'.join(references),
|
||||
'inline': False
|
||||
})
|
||||
|
||||
return embed
|
||||
|
||||
def fetch_cves():
|
||||
global previous_cves
|
||||
|
||||
# Current time for the request
|
||||
now = datetime.utcnow()
|
||||
start_date = (now - timedelta(days=7)).strftime('%Y-%m-%dT%H:%M:%S.000')
|
||||
end_date = now.strftime('%Y-%m-%dT%H:%M:%S.000')
|
||||
|
||||
cvs = ["LOW", "MEDIUM", "HIGH"]
|
||||
|
||||
# Construct the request URL with the updated date range
|
||||
for severity in cvs:
|
||||
url = f"https://services.nvd.nist.gov/rest/json/cves/2.0/?pubStartDate={start_date}&pubEndDate={end_date}&cvssV3Severity={severity}"
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
|
||||
}
|
||||
|
||||
retry_attempts = 3
|
||||
for attempt in range(retry_attempts):
|
||||
try:
|
||||
# Make the request to the NVD API
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
# Debug: Print raw response
|
||||
#print("Raw response data:")
|
||||
#print(json.dumps(data, indent=2)) # Pretty-print the JSON response
|
||||
|
||||
# Extract CVEs
|
||||
cves = data.get('vulnerabilities', [])
|
||||
|
||||
# Debug: Print number of CVEs found
|
||||
print(f"Number of CVEs found: {len(cves)}")
|
||||
|
||||
# Identify new or updated CVEs
|
||||
new_or_updated_cves = []
|
||||
for cve in cves:
|
||||
cve_id = cve['cve']['id']
|
||||
last_modified = cve['cve'].get('lastModified', 'Not available')
|
||||
|
||||
if (cve_id, last_modified) not in previous_cves:
|
||||
new_or_updated_cves.append(cve)
|
||||
|
||||
if new_or_updated_cves:
|
||||
print(f"Found {len(new_or_updated_cves)} new or updated CVEs:")
|
||||
for cve in new_or_updated_cves:
|
||||
# Format the CVE data as an embed
|
||||
embed = format_embed(cve)
|
||||
# Send the embed via webhook
|
||||
send_webhook(embed)
|
||||
|
||||
# Update the dictionary of previously fetched CVEs
|
||||
previous_cves[(cve['cve']['id'], cve['cve'].get('lastModified', 'Not available'))] = cve['cve'].get('lastModified', 'Not available')
|
||||
|
||||
save_previous_cves()
|
||||
|
||||
else:
|
||||
print("No new or updated CVEs found.")
|
||||
|
||||
# Exit the retry loop if successful
|
||||
break
|
||||
|
||||
except requests.RequestException as e:
|
||||
print(f"Error fetching CVEs (attempt {attempt + 1}/{retry_attempts}): {e}")
|
||||
# Wait before retrying
|
||||
time.sleep(5)
|
||||
|
||||
# Load previously fetched CVEs from file
|
||||
load_previous_cves()
|
||||
|
||||
# Schedule the task to run every hour
|
||||
schedule.every(1).hours.do(fetch_cves)
|
||||
fetch_cves()
|
||||
print("Starting CVE monitoring...")
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
80
youtubes.txt
Normal file
80
youtubes.txt
Normal file
@ -0,0 +1,80 @@
|
||||
https://www.youtube.com/c/InsiderPhD
|
||||
https://www.youtube.com/c/RanaKhalil101
|
||||
https://www.youtube.com/c/SpinTheHack
|
||||
https://www.youtube.com/c/PwnFunction
|
||||
https://www.youtube.com/c/CyberSecVillage
|
||||
https://www.youtube.com/c/StefanRows
|
||||
https://www.youtube.com/c/ITSecurityLabs
|
||||
https://www.youtube.com/c/Cybrcom
|
||||
https://www.youtube.com/c/TheXSSrat
|
||||
https://www.youtube.com/c/CristiVladZ
|
||||
https://www.youtube.com/c/HackerOneTV
|
||||
https://www.youtube.com/c/PinkDraconian
|
||||
https://www.youtube.com/c/ForensicTech
|
||||
https://www.youtube.com/c/hak5
|
||||
https://www.youtube.com/c/TheCyberMentor
|
||||
https://www.youtube.com/c/NullByteWHT
|
||||
https://www.youtube.com/c/HackerSploit
|
||||
https://www.youtube.com/c/STOKfredrik
|
||||
https://www.youtube.com/c/ippsec
|
||||
https://www.youtube.com/c/ScriptKiddieHub
|
||||
https://www.youtube.com/c/zSecurity
|
||||
https://www.youtube.com/c/JonGoodCyber
|
||||
https://www.youtube.com/c/CybersecurityWeb
|
||||
https://www.youtube.com/c/MotasemHamdaninfosec
|
||||
https://www.youtube.com/c/ITCareerQuestions
|
||||
https://www.youtube.com/c/BugBountyReportsExplained
|
||||
https://www.youtube.com/c/TechChipNet
|
||||
https://www.youtube.com/c/TechnicalNavigator
|
||||
https://www.youtube.com/c/BeauKnowsTechStuff
|
||||
https://www.youtube.com/c/CyberSecurityTV
|
||||
https://www.youtube.com/c/indianblackhats
|
||||
https://www.youtube.com/c/DavidBombal
|
||||
https://www.youtube.com/c/Nahamsec
|
||||
https://www.youtube.com/c/LoiLiangYang
|
||||
https://www.youtube.com/c/professormesser
|
||||
https://www.youtube.com/c/NetworkChuck
|
||||
https://www.youtube.com/c/JohnHammond010
|
||||
https://www.youtube.com/c/DarkSec
|
||||
https://www.youtube.com/c/CryptoCat23
|
||||
https://www.youtube.com/c/devnull1337
|
||||
https://www.youtube.com/c/Seytonic
|
||||
https://www.youtube.com/c/LiveOverflow
|
||||
https://www.youtube.com/c/SecurityFWD
|
||||
https://www.youtube.com/c/TheHeraneVlogs5
|
||||
https://www.youtube.com/c/FindingUrPasswd
|
||||
https://www.youtube.com/c/BittenTech
|
||||
https://www.youtube.com/c/CyberAcademyHindi
|
||||
https://www.youtube.com/c/Cyberspatial
|
||||
https://www.youtube.com/c/GynvaelEN
|
||||
https://www.youtube.com/c/SheshTheCyberSecurity
|
||||
https://www.youtube.com/c/impratikdabhi
|
||||
https://www.youtube.com/c/MrTurvey
|
||||
https://www.youtube.com/c/Bugcrowd
|
||||
https://www.youtube.com/c/BlackPerl
|
||||
https://www.youtube.com/c/MurmusCTF
|
||||
https://www.youtube.com/c/PentestToolscom
|
||||
https://www.youtube.com/c/TheHackersWorld
|
||||
https://www.youtube.com/c/BlackHatOfficialYT
|
||||
https://www.youtube.com/c/InfiniteLogins
|
||||
https://www.youtube.com/c/HackingSimplifiedAS
|
||||
https://www.youtube.com/c/ZetaTwo
|
||||
https://www.youtube.com/c/EhackingNet
|
||||
https://www.youtube.com/c/MastersinIT
|
||||
https://www.youtube.com/c/InfoCk
|
||||
https://www.youtube.com/c/CyberInsecurity
|
||||
https://www.youtube.com/c/troyhuntdotcom
|
||||
https://www.youtube.com/c/Tech69YT
|
||||
https://www.youtube.com/c/CloudSecurityPodcast
|
||||
https://www.youtube.com/c/HusseinNasser-software-engineering
|
||||
https://www.youtube.com/@DEFCONConference
|
||||
https://www.youtube.com/@BlackHatOfficialYT
|
||||
https://www.youtube.com/@thenewboston
|
||||
https://www.youtube.com/@PentesterAcademyTV
|
||||
https://www.youtube.com/@STOKfredrik
|
||||
https://www.youtube.com/@InfoSecPat
|
||||
https://www.youtube.com/channel/UCo1NHk_bgbAbDBc4JinrXww
|
||||
https://www.youtube.com/@Malectrica
|
||||
https://www.youtube.com/@NahamSec
|
||||
https://www.youtube.com/@LowLevelTV
|
||||
https://www.youtube.com/@Fireship
|
162
ytmonitor.py
Normal file
162
ytmonitor.py
Normal file
@ -0,0 +1,162 @@
|
||||
import requests
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.common.by import By
|
||||
from datetime import datetime, timedelta
|
||||
import time
|
||||
import re
|
||||
|
||||
# Set your Discord webhook URL here
|
||||
DISCORD_WEBHOOK_URL = '' # Replace with your actual webhook URL
|
||||
SEEN_VIDEOS_FILE = 'seen_videos.txt' # File to keep track of seen video IDs
|
||||
|
||||
options = Options()
|
||||
#options.add_argument("--headless") # Uncomment for headless mode
|
||||
options.add_argument('--no-sandbox') # Disable sandbox
|
||||
options.add_argument('--disable-dev-shm-usage') # Use shared memory in case of resource issues
|
||||
options.add_argument('--disable-gpu') # Disable GPU if running into GPU issues
|
||||
options.add_experimental_option("prefs", {"profile.default_content_setting_values.notifications": 1})
|
||||
options.add_argument("--disable-infobars")
|
||||
|
||||
|
||||
|
||||
CHECK_INTERVAL = 60*60*1 # Check every 12 hours (43200 seconds)
|
||||
seen_videos = set() # To keep track of seen video IDs
|
||||
|
||||
def load_seen_videos():
|
||||
"""Load seen video IDs from a file."""
|
||||
try:
|
||||
with open(SEEN_VIDEOS_FILE, 'r') as file:
|
||||
return set(line.strip() for line in file if line.strip())
|
||||
except FileNotFoundError:
|
||||
return set()
|
||||
|
||||
def save_seen_video(video_id):
|
||||
"""Save a video ID to the seen videos file."""
|
||||
with open(SEEN_VIDEOS_FILE, 'a') as file:
|
||||
file.write(f"{video_id}\n")
|
||||
|
||||
def send_discord_notification(video_title, video_link, upload_time, thumbnail_url, creator_name):
|
||||
embed = {
|
||||
"title": video_title,
|
||||
"url": video_link,
|
||||
"description": f"Creator: {creator_name}\nUploaded on: {upload_time.strftime('%Y-%m-%d %H:%M:%S')}",
|
||||
"thumbnail": {"url": thumbnail_url},
|
||||
"color": 16711680
|
||||
}
|
||||
data = {
|
||||
"embeds": [embed]
|
||||
}
|
||||
requests.post(DISCORD_WEBHOOK_URL, json=data)
|
||||
|
||||
def parse_time(upload_time_str):
|
||||
"""Convert the upload time string to a datetime object."""
|
||||
time_units = {
|
||||
'years': 0,
|
||||
'months': 0,
|
||||
'weeks': 0,
|
||||
'days': 0,
|
||||
'hours': 0,
|
||||
'minutes': 0,
|
||||
'seconds': 0
|
||||
}
|
||||
|
||||
time_pattern = re.compile(r'(\d+)\s*(year|years|month|months|week|weeks|day|days|hour|hours|minute|minutes|second|seconds)')
|
||||
|
||||
for match in time_pattern.finditer(upload_time_str):
|
||||
value, unit = match.groups()
|
||||
value = int(value)
|
||||
|
||||
if 'year' in unit:
|
||||
time_units['years'] += value
|
||||
elif 'month' in unit:
|
||||
time_units['months'] += value
|
||||
elif 'week' in unit:
|
||||
time_units['weeks'] += value
|
||||
elif 'day' in unit:
|
||||
time_units['days'] += value
|
||||
elif 'hour' in unit:
|
||||
time_units['hours'] += value
|
||||
elif 'minute' in unit:
|
||||
time_units['minutes'] += value
|
||||
elif 'second' in unit:
|
||||
time_units['seconds'] += value
|
||||
|
||||
# Convert months and years to days
|
||||
total_days = (time_units['years'] * 365) + (time_units['months'] * 30) + time_units['days']
|
||||
|
||||
# Create a timedelta
|
||||
total_time = timedelta(
|
||||
days=total_days,
|
||||
weeks=time_units['weeks'],
|
||||
hours=time_units['hours'],
|
||||
minutes=time_units['minutes'],
|
||||
seconds=time_units['seconds']
|
||||
)
|
||||
|
||||
# Return the correct upload time
|
||||
return datetime.now() - total_time
|
||||
|
||||
def fetch_latest_videos(channel_url, driver):
|
||||
driver.get(channel_url)
|
||||
time.sleep(3) # Wait for the page to load
|
||||
|
||||
# Extract the creator name from the <title> tag
|
||||
creator_name = driver.title.replace(" - YouTube", "")
|
||||
|
||||
# Click "Allow Cookies" if the button is present
|
||||
try:
|
||||
cookie_button = driver.find_element(By.CSS_SELECTOR, "button.VfPpkd-LgbsSe.VfPpkd-LgbsSe-OWXEXe-k8QpJ.VfPpkd-LgbsSe-OWXEXe-dgl2Hf.nCP5yc.AjY5Oe.DuMIQc.Gu558e")
|
||||
cookie_button.click()
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Find all video elements
|
||||
video_elements = driver.find_elements(By.CSS_SELECTOR, 'ytd-rich-grid-media')
|
||||
new_videos = []
|
||||
|
||||
for video in video_elements:
|
||||
title_element = video.find_element(By.ID, 'video-title-link')
|
||||
upload_time_element = video.find_elements(By.CSS_SELECTOR, 'span.inline-metadata-item')[-1] # Get the last one for upload time
|
||||
thumbnail_element = video.find_element(By.CSS_SELECTOR, 'img.yt-core-image')
|
||||
|
||||
video_id = title_element.get_attribute('href').split('v=')[-1]
|
||||
if video_id not in seen_videos:
|
||||
video_title = title_element.get_attribute('title')
|
||||
upload_time_str = upload_time_element.text
|
||||
thumbnail_url = thumbnail_element.get_attribute('src')
|
||||
|
||||
# Get upload time
|
||||
upload_time = parse_time(upload_time_str)
|
||||
|
||||
# Check if the video was uploaded in the last 12 hours
|
||||
if datetime.now() - upload_time < timedelta(days=7):
|
||||
new_videos.append((video_title, title_element.get_attribute('href'), upload_time, thumbnail_url, creator_name))
|
||||
seen_videos.add(video_id)
|
||||
save_seen_video(video_id)
|
||||
|
||||
return new_videos
|
||||
|
||||
def main():
|
||||
global seen_videos
|
||||
seen_videos = load_seen_videos()
|
||||
try:
|
||||
with open('youtubes.txt', 'r') as file:
|
||||
channels = [line.strip() for line in file if line.strip()]
|
||||
|
||||
while True:
|
||||
driver = webdriver.Chrome(options=options)
|
||||
for channel in channels:
|
||||
channel_url = f"{channel}/videos"
|
||||
new_videos = fetch_latest_videos(channel_url,driver)
|
||||
for video_title, video_link, upload_time, thumbnail_url, creator_name in new_videos:
|
||||
print(f"New video found: {video_title}\nLink: {video_link}\nUploaded on: {upload_time}\nThumbnail: {thumbnail_url}\nCreator: {creator_name}")
|
||||
send_discord_notification(video_title, video_link, upload_time, thumbnail_url, creator_name) # Notify
|
||||
driver.quit()
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
finally:
|
||||
driver.quit()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Loading…
Reference in New Issue
Block a user