|
#!/bin/env python |
|
|
|
import sqlite3, os, sys |
|
|
|
if os.popen("uname -sr").read() != "Darwin 14.0.0\n": |
|
print("This is not for your OS X version") |
|
sys.exit(1) |
|
|
|
def verify_format(cursor, name): |
|
tables = old.execute("SELECT name FROM sqlite_master WHERE type='table'").fetchall() |
|
if tables != [(u'_SqliteDatabaseProperties',), (u'chat',), (u'sqlite_sequence',), (u'attachment',), (u'handle',), (u'chat_handle_join',), (u'message',), (u'chat_message_join',), (u'message_attachment_join',)]: |
|
print("Unexpected database format (%s). Didn't iMessage convert it?" % name) |
|
sys.exit(1) |
|
|
|
def fetch(cursor, table): |
|
cursor.execute("SELECT * FROM " + table + " ORDER BY ROWID") |
|
return cursor.fetchall() |
|
|
|
def max_row_id(cursor, table): |
|
cursor.execute("SELECT Max(ROWID) FROM " + table) |
|
return cursor.fetchone()[0] |
|
|
|
def set_tuple_index(tup, index, value): |
|
return tuple(tup[0:index]) + (value,) + tuple(tup[index+1:]) |
|
|
|
def tuple_where(tuples, id_func, o): |
|
matches = filter(lambda x: id_func(x) == id_func(o), tuples) |
|
return matches[0][0] if len(matches) > 0 else None |
|
|
|
def merge(old_cursor, new_cursor, table, id_func): |
|
old = fetch(old_cursor, table) |
|
new = fetch(new_cursor, table) |
|
additional = [] |
|
row_map = {} |
|
next_row = max_row_id(old_cursor, table) + 1 |
|
for row in new: |
|
rowid = row[0] |
|
mapped_row = tuple_where(old, id_func, row) |
|
if not mapped_row: |
|
mapped_row = next_row |
|
next_row += 1 |
|
additional = additional + [set_tuple_index(row, 0, mapped_row)] |
|
print("Adding %s row %d" % (table, mapped_row)) |
|
else: |
|
print("Mapping %s row %d->%d" % (table, rowid, mapped_row)) |
|
row_map[rowid] = mapped_row |
|
return (row_map, additional) |
|
|
|
def apply_map_to_tuple_index(tuples, index, m): |
|
return map(lambda t: set_tuple_index(t, index, m[t[index]]), tuples) |
|
|
|
def merge_joins(old, new, table, map1, map2): |
|
print(table + ":") |
|
new_rows = fetch(new, table) |
|
old_rows = fetch(old, table) |
|
mapped = apply_map_to_join(new_rows, map1, map2) |
|
return filter(lambda x: x not in old_rows, mapped) |
|
|
|
def apply_map_to_join(tuples, map1, map2): |
|
for t in tuples: |
|
print("Mapping %d/%d->%d/%d" % (t[0], t[1], map1[t[0]], map2[t[1]])) |
|
return map(lambda t: (map1[t[0]], map2[t[1]]), tuples) |
|
|
|
def insert_values(cursor, values, db): |
|
for row in values: |
|
placeholder = ", ".join(map(lambda x: "?", list(row))) |
|
statement = "INSERT INTO %s VALUES (%s)" % (db, placeholder) |
|
cursor.execute(statement, row) |
|
|
|
with sqlite3.connect("chat.db") as old_db, sqlite3.connect("new.db") as new_db: |
|
old = old_db.cursor() |
|
new = new_db.cursor() |
|
|
|
verify_format(old, "chat.db") |
|
verify_format(new, "new.db") |
|
|
|
(attachment_map, attachments) = merge(old, new, "attachment", lambda x: x[1]) |
|
(chat_map, chats) = merge(old, new, "chat", lambda x: x[1]) |
|
(handle_map, handles) = merge(old, new, "handle", lambda x: (x[1], x[3])) |
|
(message_map, messages) = merge(old, new, "message", lambda x: x[1]) |
|
messages = apply_map_to_tuple_index(messages, 5, handle_map) |
|
|
|
chat_handle_joins = merge_joins(old, new, "chat_handle_join", chat_map, handle_map) |
|
chat_message_joins = merge_joins(old, new, "chat_message_join", chat_map, message_map) |
|
message_attachment_joins = merge_joins(old, new, "message_attachment_join", message_map, attachment_map) |
|
|
|
insert_values(old, attachments, "attachment") |
|
insert_values(old, chats, "chat") |
|
insert_values(old, handles, "handle") |
|
insert_values(old, messages, "message") |
|
|
|
insert_values(old, chat_handle_joins, "chat_handle_join") |
|
insert_values(old, chat_message_joins, "chat_message_join") |
|
insert_values(old, message_attachment_joins, "message_attachment_join") |
|
|
|
old_db.commit() |
|
print("OK") |
If someone has the same problem as me : for messages which have a handle_id set to 0, find the handle_id for your own account (can be 1 but not always) and use that value as the handle_id of those messages. I'm sorry, my expertise on the topic doesn't go any further.