fix: delete migrate sync data script (#27061)

This commit is contained in:
zyssyz123 2025-10-20 13:54:24 +08:00 committed by GitHub
parent f811471b18
commit 1153dcef69
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 0 additions and 49 deletions

View File

@ -22,55 +22,6 @@ def upgrade():
batch_op.add_column(sa.Column('app_mode', sa.String(length=255), nullable=True))
batch_op.create_index('message_app_mode_idx', ['app_mode'], unique=False)
conn = op.get_bind()
# Strategy: Update in batches to minimize lock time
# For large tables (millions of rows), this prevents long-running transactions
batch_size = 10000
print("Starting backfill of app_mode from conversations...")
# Use a more efficient UPDATE with JOIN
# This query updates messages.app_mode from conversations.mode
# Using string formatting for LIMIT since it's a constant
update_query = f"""
UPDATE messages m
SET app_mode = c.mode
FROM conversations c
WHERE m.conversation_id = c.id
AND m.app_mode IS NULL
AND m.id IN (
SELECT id FROM messages
WHERE app_mode IS NULL
LIMIT {batch_size}
)
"""
# Execute batched updates
total_updated = 0
iteration = 0
while True:
iteration += 1
result = conn.execute(sa.text(update_query))
# Check if result is None or has no rowcount
if result is None:
print("Warning: Query returned None, stopping backfill")
break
rows_updated = result.rowcount if hasattr(result, 'rowcount') else 0
total_updated += rows_updated
if rows_updated == 0:
break
print(f"Iteration {iteration}: Updated {rows_updated} messages (total: {total_updated})")
# For very large tables, add a small delay to reduce load
# Uncomment if needed: import time; time.sleep(0.1)
print(f"Backfill completed. Total messages updated: {total_updated}")
# ### end Alembic commands ###