#!/usr/bin/env python3 """ QuizVDS SPL Exam Questions Scraper Scrapes all SPL questions from quizvds.it and saves them as markdown files. """ import requests from bs4 import BeautifulSoup import json import time import re import os from pathlib import Path BASE_URL = "https://quizvds.it" HEADERS = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "Connection": "keep-alive", } OUTPUT_DIR = Path("/Users/i052341/Daten/Cloud/04 - Ablage/Ablage 2020 - 2029/Ablage 2025/Hobbies 2025/Segelflug/Theorie/Glidr/QuizVDS") SUBJECTS = [ ("10", "Air Law", "air-law"), ("20", "Aircraft General Knowledge", "aircraft-general-knowledge"), ("30", "Flight Performance and Planning", "flight-performance-and-planning"), ("40", "Human Performance and Limitations", "human-performance-and-limitations"), ("50", "Meteorology", "meteorology"), ("60", "Navigation", "navigation"), ("70", "Operational Procedures", "operational-procedures"), ("80", "Principles of Flight", "principle-of-flight-aeroplane"), ("90", "Communication", "communication"), ] LANGUAGES = [ ("en-en", "English"), # ("it-it", "Italian"), # Uncomment if Italian version needed ] def get_page(url, retries=3): """Fetch a page with retry logic.""" for attempt in range(retries): try: resp = requests.get(url, headers=HEADERS, timeout=15) resp.raise_for_status() return resp.text except Exception as e: if attempt < retries - 1: print(f" Retry {attempt + 1} for {url}: {e}") time.sleep(2) else: print(f" FAILED after {retries} attempts: {url}: {e}") return None return None def get_question_urls_from_listing(lang_code, subject_slug): """Get all question URLs from the listing pages for a subject.""" question_urls = [] for page_num in range(1, 50): # Max 50 pages per subject if page_num == 1: url = f"{BASE_URL}/{lang_code}/quiz/spl-en/{subject_slug}" else: url = f"{BASE_URL}/{lang_code}/quiz/spl-en/{subject_slug}/page{page_num}" html = get_page(url) if not html: break soup = BeautifulSoup(html, "html.parser") # Find all question links in the table table = soup.find("table") if not table: break links = table.find_all("a", href=True) # Question links have the subject slug and a numeric ID, exclude /page links page_links = [ a["href"] for a in links if f"/quiz/spl-en/{subject_slug}/" in a["href"] and "/page" not in a["href"] ] if not page_links: break question_urls.extend(page_links) print(f" Page {page_num}: found {len(page_links)} questions (total: {len(question_urls)})") time.sleep(0.3) return question_urls def parse_question_page(html, url): """Parse a question page and extract Q&A data.""" if not html: return None soup = BeautifulSoup(html, "html.parser") # Extract from JSON-LD structured data (most reliable) question_text = None correct_answer = None all_answers = [] for script in soup.find_all("script", type="application/ld+json"): try: data = json.loads(script.string) if data.get("@type") == "QAPage": main = data.get("mainEntity", {}) question_text = main.get("name", "") accepted = main.get("acceptedAnswer", {}) correct_answer = accepted.get("text", "") suggested = main.get("suggestedAnswer", []) all_wrong = [s.get("text", "") for s in suggested] except (json.JSONDecodeError, AttributeError): pass if not question_text: return None # Get answers from the HTML table (preserves order A/B/C/D) table = soup.find("table") answers_from_table = [] if table: for row in table.find_all("tr"): cell = row.find("td") if cell: text = cell.get_text(strip=True) is_correct = row.get("class") and "success" in row.get("class", []) answers_from_table.append((text, is_correct)) # Build final answer list if answers_from_table: final_answers = answers_from_table else: # Fallback: reconstruct from JSON-LD wrong = all_wrong if 'all_wrong' in dir() else [] final_answers = [(correct_answer, True)] + [(a, False) for a in wrong] return { "question": question_text, "answers": final_answers, "correct_answer": correct_answer, "url": url, } def format_question_markdown(q_num, data): """Format a question as markdown.""" letters = ["A", "B", "C", "D", "E"] question = data["question"] answers = data["answers"] correct_answer = data["correct_answer"] lines = [ f"### Q{q_num}: {question} ^q{q_num}", ] correct_letter = None for i, (ans_text, is_correct) in enumerate(answers): letter = letters[i] lines.append(f"- {letter}) {ans_text}") if is_correct or ans_text == correct_answer: correct_letter = letter if correct_letter: lines.append(f"**Correct: {correct_letter})**") else: lines.append(f"**Correct: ?)** ") lines.append("") lines.append("> **Explanation:** ") lines.append("") return "\n".join(lines) def scrape_subject(lang_code, subject_code, subject_name, subject_slug): """Scrape all questions for a subject.""" print(f"\n Scraping {subject_name} ({subject_slug})...") # Get all question URLs from listing pages question_urls = get_question_urls_from_listing(lang_code, subject_slug) print(f" Found {len(question_urls)} question URLs") if not question_urls: print(f" WARNING: No questions found for {subject_name}") return 0 questions = [] for i, url in enumerate(question_urls, 1): # Make URL absolute if url.startswith("/"): full_url = BASE_URL + url else: full_url = url html = get_page(full_url) data = parse_question_page(html, full_url) if data: questions.append(data) if i % 10 == 0: print(f" Scraped {i}/{len(question_urls)} questions...") else: print(f" WARNING: Failed to parse question {i}: {full_url}") time.sleep(0.2) # Be polite to the server return questions def save_subject_markdown(subject_code, subject_name, questions, lang_suffix=""): """Save questions to a markdown file.""" filename = f"{subject_code} - {subject_name}{lang_suffix}.md" filepath = OUTPUT_DIR / filename lines = [ f"# {subject_code} - {subject_name}", "", f"Total questions: {len(questions)}", "", "---", "", ] for i, data in enumerate(questions, 1): lines.append(format_question_markdown(i, data)) lines.append("---") lines.append("") content = "\n".join(lines) with open(filepath, "w", encoding="utf-8") as f: f.write(content) print(f" Saved {len(questions)} questions to: {filename}") return filepath def main(): """Main scraping function.""" print("QuizVDS SPL Exam Questions Scraper") print("=" * 50) summary = {} for lang_code, lang_name in LANGUAGES: print(f"\nLanguage: {lang_name} ({lang_code})") print("-" * 40) lang_suffix = "" if lang_code == "en-en" else f" ({lang_name})" for subject_code, subject_name, subject_slug in SUBJECTS: questions = scrape_subject(lang_code, subject_code, subject_name, subject_slug) if questions: save_subject_markdown(subject_code, subject_name, questions, lang_suffix) summary[f"{subject_code} - {subject_name}"] = len(questions) else: summary[f"{subject_code} - {subject_name}"] = 0 # Write summary print("\n" + "=" * 50) print("SUMMARY") print("=" * 50) total = 0 for subject, count in summary.items(): print(f" {subject}: {count} questions") total += count print(f"\n TOTAL: {total} questions") # Save summary to file summary_path = OUTPUT_DIR / "SUMMARY.md" with open(summary_path, "w") as f: f.write("# QuizVDS SPL Scraping Summary\n\n") f.write(f"Scraped on: 2026-03-17\n\n") f.write("| Subject | Questions |\n") f.write("|---------|----------|\n") for subject, count in summary.items(): f.write(f"| {subject} | {count} |\n") f.write(f"\n**Total: {total} questions**\n") print(f"\nSummary saved to: {summary_path}") if __name__ == "__main__": main()