#!/usr/bin/env python3 """ Process word list from tab-separated input file with strict character filtering. Only allows: a-z, A-Z, German umlauts (öäüÖÄÜß), and hyphenation characters. """ import sys import unicodedata import re def is_quote_character(char): """Check if a character is a quote using Unicode categories.""" # Unicode quote categories: # - Ps: Punctuation, open (includes opening quotes) # - Pe: Punctuation, close (includes closing quotes) # - Pi: Punctuation, initial quote # - Pf: Punctuation, final quote category = unicodedata.category(char) return category in ('Pi', 'Pf', 'Ps', 'Pe') or char in ('"', "'", '`', '´', ''', ''', '"', '"', '‚', '„', '‹', '›') def has_broken_encoding(word): """ Check if word contains broken Unicode characters. Returns True if the word contains: - U+FFFD (�) - Unicode replacement character - Other common signs of encoding issues """ if not word: return False # Check for replacement character (�) if '\ufffd' in word: return True # Check for other suspicious patterns that indicate broken encoding # Characters that often indicate mojibake suspicious_patterns = [ '\ufffd', # Replacement character '\x00', # Null character ] return any(pattern in word for pattern in suspicious_patterns) def clean_word(word): """ Clean word by: 1. Removing quotes (using Unicode quote categories) from beginning and end 2. Removing leading hyphens (but not internal ones) """ if not word: return word # Remove quotes from beginning while word and is_quote_character(word[0]): word = word[1:] # Remove quotes from end while word and is_quote_character(word[-1]): word = word[:-1] # Remove leading hyphens word = word.lstrip('-') return word def is_valid_word(word): """ Check if word contains only allowed characters: - a-z, A-Z - German umlauts: öäüÖÄÜß - Hyphenation characters: - (hyphen-minus), ‐ (hyphen), – (en dash), — (em dash), ­ (soft hyphen) """ if not word: return False # Define allowed characters allowed_pattern = re.compile(r'^[a-zA-ZöäüÖÄÜß\-‐–—­]+$') return bool(allowed_pattern.match(word)) def process_wordlist(input_file, output_file=None): """ Process the word list file with strict character filtering. Args: input_file: Path to input file or file object output_file: Path to output file (optional, defaults to stdout) """ # Determine if input_file is a path or file object if isinstance(input_file, str): with open(input_file, 'r', encoding='utf-8') as f: lines = f.readlines() else: lines = input_file.readlines() # Use a set to track unique words and maintain order with a list seen_words = set() results = [] for line in lines: line = line.strip() if not line: continue parts = line.split('\t') if len(parts) < 2: continue try: index = int(parts[0]) word = parts[1] # Filter: only process entries with index >= 100 if index < 100: continue # Skip words with broken encoding if has_broken_encoding(word): continue # Clean the word cleaned_word = clean_word(word) # Skip if cleaning resulted in broken encoding if has_broken_encoding(cleaned_word): continue # Skip words that contain spaces if ' ' in cleaned_word: continue # NEW: Only allow words with valid characters if not is_valid_word(cleaned_word): continue # Only add non-empty words that we haven't seen before if cleaned_word and cleaned_word not in seen_words: seen_words.add(cleaned_word) results.append(cleaned_word) except (ValueError, IndexError): # Skip malformed lines continue # Output results if output_file: with open(output_file, 'w', encoding='utf-8') as f: for word in results: f.write(word + '\n') else: for word in results: print(word) return results if __name__ == '__main__': if len(sys.argv) < 2: print("Usage: python process_wordlist_narrow.py [output_file]") print(" If output_file is not specified, results are printed to stdout") print("\nThis script only allows words containing:") print(" - Letters: a-z, A-Z") print(" - German umlauts: öäüÖÄÜß") print(" - Hyphenation characters: - ‐ – — ­") sys.exit(1) input_file = sys.argv[1] output_file = sys.argv[2] if len(sys.argv) > 2 else None process_wordlist(input_file, output_file)