summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorfilip <“filip.rabiega@gmail.com”>2025-04-26 13:13:01 +0200
committerfilip <“filip.rabiega@gmail.com”>2025-04-26 13:13:01 +0200
commitcd08dcdd71b846c1639bdb5647cdfdc4eded49fd (patch)
tree1125ddb458495a4a1d0452e386c554bec6742649
parentcdbef4e091c9dfbbc7c93cdfc8205be30141b2a0 (diff)
downloadchadscraper-cd08dcdd71b846c1639bdb5647cdfdc4eded49fd.tar.gz
chadscraper-cd08dcdd71b846c1639bdb5647cdfdc4eded49fd.tar.bz2
chadscraper-cd08dcdd71b846c1639bdb5647cdfdc4eded49fd.zip
added chadscrapper-seo.py and backlinkchecker.py and modified others
-rw-r--r--backlinkchecker.py22
-rw-r--r--chadcrawler.py103
-rw-r--r--chadscrapper-seo.py89
3 files changed, 196 insertions, 18 deletions
diff --git a/backlinkchecker.py b/backlinkchecker.py
new file mode 100644
index 0000000..45076b0
--- /dev/null
+++ b/backlinkchecker.py
@@ -0,0 +1,22 @@
+import requests
+from bs4 import BeautifulSoup
+
+DOMAIN = "example.com"
+query = f"link:{DOMAIN}"
+url = f"https://www.google.com/search?q={query}"
+
+# Proxy setup (Replace with a working proxy)
+proxies = {
+ "http": "http://your_proxy:port",
+ "https": "https://your_proxy:port"
+}
+
+headers = {"User-Agent": "Mozilla/5.0"}
+
+response = requests.get(url, headers=headers, proxies=proxies)
+soup = BeautifulSoup(response.text, "html.parser")
+
+for link in soup.find_all("a"):
+ href = link.get("href")
+ if "http" in href:
+ print(href)
diff --git a/chadcrawler.py b/chadcrawler.py
index cf39e92..a7f4290 100644
--- a/chadcrawler.py
+++ b/chadcrawler.py
@@ -2,40 +2,107 @@ import requests
from bs4 import BeautifulSoup
import csv
import time
+import os
+import argparse
+import concurrent.futures
+from urllib.parse import urljoin, urlparse, urldefrag
-visited_urls = set()
-csv_filename = "crawled_data.csv"
-def crawl(url, depth=2):
+def normalize_url(url):
+ """Remove fragments and normalize URLs."""
+ return urldefrag(urljoin(base_url, url)).url.strip()
+
+
+def crawl(url, depth):
+ """Recursively crawl webpages with multithreading."""
if depth == 0 or url in visited_urls:
return
+
try:
- response = requests.get(url, timeout=5)
+ response = session.get(url, timeout=5)
response.raise_for_status()
- except requests.RequestException:
+ except requests.RequestException as e:
+ print(f"Error accessing {url}: {e}")
return
visited_urls.add(url)
soup = BeautifulSoup(response.text, "html.parser")
- # Extract title and all links
- title = soup.title.string if soup.title else "No Title"
- links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].startswith("http")]
+ # Extract more data
+ title = soup.title.string.strip() if soup.title else "No Title"
+ description = soup.find('meta', attrs={'name': 'description'})
+ description = description['content'] if description else "No description"
+ h1 = soup.find('h1')
+ h1_text = h1.text.strip() if h1 else "No H1"
+
+ # Extract images
+ images = [img['src'] for img in soup.find_all('img', src=True)]
+ images = ", ".join(set(normalize_url(img) for img in images))
+
+ # Extract additional headings
+ h2_tags = [h2.text.strip() for h2 in soup.find_all('h2')]
+ h2_text = ", ".join(h2_tags) if h2_tags else "No H2"
+
+ # Save more data to CSV
+ save_to_csv([url, title, description, h1_text, h2_text, images])
- # Save to CSV
- save_to_csv([url, title])
+ # Find valid links
+ links = set(
+ normalize_url(a['href']) for a in soup.find_all('a', href=True)
+ if is_valid_url(a['href'])
+ )
+
+ # Multithreading for crawling next links
+ with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
+ executor.map(lambda link: crawl(link, depth - 1), links)
+
+ time.sleep(1) # Avoid excessive requests
+
+
+def is_valid_url(link):
+ """Ensure the link is absolute and within the same domain."""
+ parsed_link = urlparse(normalize_url(link))
+ return parsed_link.scheme in {"http", "https"} and parsed_link.netloc == parsed_base.netloc
- # Crawl next links
- for link in links:
- crawl(link, depth - 1)
- time.sleep(1) # Delay to avoid overloading the server
def save_to_csv(data):
+ """Append data to CSV file."""
+ file_exists = os.path.isfile(csv_filename)
with open(csv_filename, "a", newline="", encoding="utf-8") as file:
writer = csv.writer(file)
+ if not file_exists:
+ writer.writerow(["URL", "Title", "Description", "H1", "H2", "Images"])
writer.writerow(data)
-# Start crawling
-start_url = "https://example.com"
-crawl(start_url)
-print("Crawling finished. Data saved in", csv_filename)
+
+def main():
+ parser = argparse.ArgumentParser(description="Multithreaded Web Crawler")
+ parser.add_argument("start_url", help="URL to start crawling from")
+ parser.add_argument("--depth", type=int, default=2, help="Crawling depth")
+ parser.add_argument("--output", default="crawled_data.csv", help="Output CSV filename")
+ args = parser.parse_args()
+
+ global visited_urls, session, base_url, parsed_base, csv_filename
+ visited_urls = set()
+ session = requests.Session()
+ session.headers.update({"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"})
+ csv_filename = args.output
+
+ # Extract base URL for resolving relative links
+ base_url = "{0.scheme}://{0.netloc}".format(urlparse(args.start_url))
+ parsed_base = urlparse(base_url)
+
+ print(f"Starting crawl at {args.start_url} with depth {args.depth}")
+ print(f"Results will be saved to {csv_filename}")
+
+ # Initialize CSV file
+ with open(csv_filename, "w", newline="", encoding="utf-8") as file:
+ writer = csv.writer(file)
+ writer.writerow(["URL", "Title", "Description", "H1", "H2", "Images"])
+
+ crawl(args.start_url, args.depth)
+ print(f"Crawling finished. Visited {len(visited_urls)} pages. Data saved in {csv_filename}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/chadscrapper-seo.py b/chadscrapper-seo.py
new file mode 100644
index 0000000..7786eab
--- /dev/null
+++ b/chadscrapper-seo.py
@@ -0,0 +1,89 @@
+import aiohttp
+import asyncio
+import pandas as pd
+from bs4 import BeautifulSoup
+from urllib.parse import urljoin, urlparse
+from tqdm import tqdm
+
+HEADERS = {"User-Agent": "Mozilla/5.0"}
+
+async def fetch_url(session, url):
+ """Fetches HTML content asynchronously."""
+ try:
+ async with session.get(url, headers=HEADERS, timeout=10) as response:
+ response.raise_for_status()
+ return await response.text()
+ except Exception as e:
+ print(f"Failed to fetch {url}: {e}")
+ return None
+
+def parse_html(url, html):
+ """Parses HTML and extracts SEO data."""
+ soup = BeautifulSoup(html, "html.parser")
+
+ title = soup.title.string.strip() if soup.title else "N/A"
+
+ def get_meta_content(name):
+ tag = soup.find("meta", attrs={"name": name})
+ return tag["content"].strip() if tag else "N/A"
+
+ meta_desc = get_meta_content("description")
+ meta_keywords = get_meta_content("keywords")
+
+ # Extracting Open Graph & Twitter Card Data
+ og_title = get_meta_content("og:title")
+ og_desc = get_meta_content("og:description")
+ twitter_title = get_meta_content("twitter:title")
+ twitter_desc = get_meta_content("twitter:description")
+
+ # Extract Canonical URL
+ canonical_tag = soup.find("link", rel="canonical")
+ canonical_url = canonical_tag["href"].strip() if canonical_tag else url
+
+ # Extract Headings
+ headings = {f"H{i}": [h.get_text(strip=True) for h in soup.find_all(f"h{i}")] for i in range(1, 7)}
+
+ # Extract Links
+ internal_links, external_links = set(), set()
+ for link in soup.find_all("a", href=True):
+ href = link["href"].strip()
+ full_url = urljoin(url, href)
+ (internal_links if urlparse(full_url).netloc == urlparse(url).netloc else external_links).add(full_url)
+
+ return {
+ "URL": url,
+ "Canonical URL": canonical_url,
+ "Title": title,
+ "Meta Description": meta_desc,
+ "Meta Keywords": meta_keywords,
+ "OG Title": og_title,
+ "OG Description": og_desc,
+ "Twitter Title": twitter_title,
+ "Twitter Description": twitter_desc,
+ **headings,
+ "Internal Links": list(internal_links),
+ "External Links": list(external_links),
+ }
+
+async def fetch_seo_data(urls):
+ """Fetches SEO data for multiple URLs asynchronously."""
+ async with aiohttp.ClientSession() as session:
+ tasks = [fetch_url(session, url) for url in urls]
+ responses = await asyncio.gather(*tasks)
+
+ return [parse_html(url, html) for url, html in zip(urls, responses) if html]
+
+def save_to_csv(data, filename="seo_data.csv"):
+ """Saves the extracted data to a CSV file."""
+ if not data:
+ print("No data to save.")
+ return
+
+ df = pd.DataFrame(data)
+ df.to_csv(filename, index=False, encoding="utf-8")
+ print(f"Data saved to {filename}")
+
+if __name__ == "__main__":
+ urls = ["https://example.com", "https://anotherwebsite.com"] # Add URLs here
+ seo_results = asyncio.run(fetch_seo_data(urls))
+ save_to_csv(seo_results)