Created
October 13, 2025 14:26
-
-
Save akunzai/83ba904d9970f81a627622647ea9f40e to your computer and use it in GitHub Desktop.
Convert NordPass-exported CSV to Apple Passwords-importable CSV
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python3 | |
""" | |
Convert NordPass-exported CSV to Apple Passwords-importable CSV. | |
Usage: | |
uv run nordpass2apple.py nordpass.csv > apple.csv | |
uv run nordpass2apple.py --type note nordpass.csv > apple.csv | |
""" | |
import argparse | |
import csv | |
import re | |
import sys | |
from typing import Dict, List, Optional | |
APPLE_HEADERS = ["Title", "URL", "Username", "Password", "Notes"] | |
# Column aliases for flexible input CSV format detection | |
COLUMN_ALIASES = { | |
"type": {"type", "item type"}, | |
"title": {"name", "title", "item", "account", "label"}, | |
"url": {"url", "website", "web address", "domain", "hostname", "login url"}, | |
"username": {"username", "user name", "login", "email", "user"}, | |
"password": {"password", "pwd", "pass"}, | |
"notes": {"note", "notes", "remarks", "comment"}, | |
"totp": {"totp", "2fa", "otp", "one-time password", "authenticator secret"}, | |
} | |
# Regex patterns | |
URL_SCHEME_RE = re.compile(r"^[a-z][a-z0-9+.-]*://", re.IGNORECASE) | |
HOST_RE = re.compile(r"^[A-Za-z0-9.-]+\.[A-Za-z]{2,}(/.*)?$") | |
URL_HOST_RE = re.compile(r"^[a-z]+://([^/]+)", re.IGNORECASE) | |
def normalize(s: str) -> str: | |
"""Normalize string for column matching.""" | |
return re.sub(r"[^a-z0-9]+", " ", s.strip().lower()) | |
def detect_columns(header: List[str]) -> Dict[str, int]: | |
"""Map logical column names to actual column indices.""" | |
mapping = {} | |
for idx, col in enumerate(header): | |
normalized = normalize(col) | |
for logical, aliases in COLUMN_ALIASES.items(): | |
if normalized in {normalize(a) for a in aliases}: | |
mapping.setdefault(logical, idx) | |
break | |
return mapping | |
def normalize_url(url: str) -> str: | |
"""Add https:// scheme if missing from valid hostnames.""" | |
if url and not URL_SCHEME_RE.match(url) and HOST_RE.match(url): | |
return f"https://{url}" | |
return url | |
def get_field(row: List[str], colmap: Dict[str, int], field: str, default: str = "") -> str: | |
"""Safely extract and strip field value from row.""" | |
idx = colmap.get(field) | |
if idx is not None and idx < len(row): | |
return row[idx].strip() | |
return default | |
def process_row( | |
row: List[str], | |
header: List[str], | |
colmap: Dict[str, int], | |
args: argparse.Namespace, | |
) -> Optional[List[str]]: | |
"""Process a single CSV row and return Apple Passwords format or None to skip.""" | |
# Check item type filter | |
item_type = get_field(row, colmap, "type").lower() | |
if args.type and item_type and item_type != args.type.lower(): | |
return None | |
# Extract core fields | |
title = get_field(row, colmap, "title") | |
url = get_field(row, colmap, "url") | |
username = get_field(row, colmap, "username") | |
password = get_field(row, colmap, "password") | |
notes = get_field(row, colmap, "notes") | |
# Derive title from URL if needed | |
if not title and args.title_from == "url" and url: | |
match = URL_HOST_RE.search(url) | |
title = match.group(1) if match else url | |
# Skip empty entries if requested | |
if args.skip_empty and not title and not password: | |
return None | |
# Build extra notes | |
extra_lines = [] | |
if args.include_totp: | |
totp = get_field(row, colmap, "totp") | |
if totp: | |
extra_lines.append(f"TOTP: {totp}") | |
if args.append_source_fields: | |
known_indices = set(colmap.values()) | |
for i, val in enumerate(row): | |
if i not in known_indices and val.strip(): | |
key = header[i] if i < len(header) else f"col_{i}" | |
extra_lines.append(f"{key}: {val.strip()}") | |
if extra_lines: | |
notes = "\n".join(filter(None, [notes] + extra_lines)) | |
# Normalize URL | |
url = normalize_url(url) | |
return [title, url, username, password, notes] | |
def main(): | |
parser = argparse.ArgumentParser( | |
description="Convert NordPass CSV to Apple Passwords CSV" | |
) | |
parser.add_argument("input_csv", help="NordPass-exported CSV file") | |
parser.add_argument( | |
"--type", | |
default="password", | |
help="Filter by item type (password, note, credit_card, folder). Default: password", | |
) | |
parser.add_argument( | |
"--include-totp", | |
action="store_true", | |
help="Include TOTP/OTP secret in Notes", | |
) | |
parser.add_argument( | |
"--skip-empty", | |
action="store_true", | |
help="Skip rows with empty Title and Password", | |
) | |
parser.add_argument( | |
"--title-from", | |
choices=["title", "url"], | |
default="title", | |
help="Derive title from URL if empty", | |
) | |
parser.add_argument( | |
"--append-source-fields", | |
action="store_true", | |
help="Append unmapped source fields to Notes", | |
) | |
args = parser.parse_args() | |
# Read input CSV (with BOM support) | |
with open(args.input_csv, encoding="utf-8-sig", newline="") as f_in: | |
reader = csv.reader(f_in) | |
rows = list(reader) | |
if not rows: | |
sys.exit("Error: Input CSV is empty") | |
header, body = rows[0], rows[1:] | |
colmap = detect_columns(header) | |
# Write output to stdout | |
writer = csv.writer(sys.stdout) | |
writer.writerow(APPLE_HEADERS) | |
count_out = 0 | |
for row in body: | |
result = process_row(row, header, colmap, args) | |
if result: | |
writer.writerow(result) | |
count_out += 1 | |
# Report to stderr | |
print( | |
f"Processed {count_out}/{len(body)} records (type={args.type})", | |
file=sys.stderr, | |
) | |
print( | |
"Reminder: delete the original NordPass CSV after verifying import.", | |
file=sys.stderr, | |
) | |
if __name__ == "__main__": | |
main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment