feat: Enhance update check and download processes with safety timeouts and logging

This commit is contained in:
claudi 2026-01-30 13:09:50 +01:00
parent f4eb511a1c
commit 4ab44c83ba
2 changed files with 156 additions and 51 deletions

View file

@ -8,6 +8,7 @@ import asyncio
import hashlib
import json
import logging
import socket
from dataclasses import dataclass
from datetime import datetime, timedelta
from pathlib import Path
@ -144,9 +145,13 @@ class UpdateManager:
Returns:
Release object if newer version available, None otherwise
"""
logger.debug(f"check_for_updates() called, current version: {self.current_version}")
# Try cache first
logger.debug("Checking cache...")
cached = self._load_cache()
if cached:
logger.debug("Found cached release")
release_data = cached.get("release")
if release_data:
version = release_data["tag_name"].lstrip("v")
@ -156,6 +161,7 @@ class UpdateManager:
return Release(**release_data)
# Fetch from API
logger.debug("Fetching from API...")
try:
logger.info(f"Checking for updates from {self.api_endpoint}")
@ -198,19 +204,40 @@ class UpdateManager:
"""
try:
logger.debug(f"Fetching release from {self.api_endpoint}")
# Use aggressive timeout: 5 seconds for connection, 5 seconds for read
with urlopen(self.api_endpoint, timeout=5) as response:
data = json.loads(response.read())
return {
"tag_name": data["tag_name"],
"name": data["name"],
"version": data["tag_name"].lstrip("v"),
"body": data["body"],
"assets": data.get("assets", []),
"published_at": data.get("published_at", ""),
}
# Set socket timeout to prevent hanging
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(5)
try:
logger.debug("Opening URL connection...")
with urlopen(self.api_endpoint, timeout=5) as response:
logger.debug(f"Response status: {response.status}, reading data...")
response_data = response.read()
logger.debug(f"Read {len(response_data)} bytes, parsing JSON...")
data = json.loads(response_data)
logger.info(f"Successfully fetched release: {data.get('tag_name', 'unknown')}")
return {
"tag_name": data["tag_name"],
"name": data["name"],
"version": data["tag_name"].lstrip("v"),
"body": data["body"],
"assets": data.get("assets", []),
"published_at": data.get("published_at", ""),
}
finally:
socket.setdefaulttimeout(old_timeout)
except socket.timeout as e:
logger.error(f"Socket timeout (5s) connecting to {self.api_endpoint}")
return None
except TimeoutError as e:
logger.error(f"Timeout error: {e}")
return None
except Exception as e:
logger.error(f"Failed to fetch release: {type(e).__name__}: {e}")
import traceback
logger.debug(traceback.format_exc())
return None
async def download_update(

View file

@ -2,6 +2,7 @@
import asyncio
import logging
from datetime import datetime
from pathlib import Path
from typing import Optional
@ -241,6 +242,7 @@ class MainWindow(QMainWindow):
super().__init__(parent)
self.config = config
self._background_threads = [] # Keep references to background threads
self._background_workers = {} # Keep references to background workers
self.checking_dialog = None # Track the checking dialog
self._is_manual_check = False # Track if this is a manual check (for UI feedback)
@ -599,35 +601,31 @@ class MainWindow(QMainWindow):
manager: UpdateManager instance
"""
try:
logger.debug("_run_async_check() starting")
# Create and start background thread
thread = QThread()
worker = UpdateCheckWorker(manager, self.config.app_version)
# Connect signals
worker.update_available.connect(self._on_update_available)
worker.update_status.connect(self._on_update_status)
worker.check_failed.connect(self._on_check_failed)
worker.finished.connect(thread.quit)
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
# Keep reference to thread to prevent garbage collection
# IMPORTANT: Keep references to prevent garbage collection
# Store in a list to keep worker alive during thread execution
self._background_threads.append(thread)
self._background_workers = getattr(self, '_background_workers', {})
self._background_workers[id(thread)] = worker
# Clean up finished threads from list
def cleanup_thread():
if thread in self._background_threads:
self._background_threads.remove(thread)
logger.debug(f"Created worker and thread, thread id: {id(thread)}")
thread.finished.connect(cleanup_thread)
# Create a safety timeout timer (but don't start it yet)
# Use a flag-based approach to avoid thread issues with stopping timers
check_started_time = [datetime.now()] # Track when check started
check_completed = [False] # Flag to mark when check completes
# Start thread
worker.moveToThread(thread)
thread.started.connect(worker.run)
thread.start()
# Set a safety timeout - if check doesn't finish in 30 seconds, force close dialog
def force_close_timeout():
# Check if already completed - if so, don't show error
if check_completed[0]:
logger.debug("Timeout fired but check already completed, suppressing error")
return
logger.warning("Update check taking too long (30s timeout)")
if hasattr(self, 'checking_dialog') and self.checking_dialog:
self.checking_dialog.close()
@ -643,10 +641,51 @@ class MainWindow(QMainWindow):
"Please check your connection and try again."
)
QTimer.singleShot(30000, force_close_timeout) # 30 seconds
safety_timer = QTimer()
safety_timer.setSingleShot(True)
safety_timer.setInterval(30000) # 30 seconds
safety_timer.timeout.connect(force_close_timeout)
# Mark check as completed when any result arrives
def on_check_done():
logger.debug("Check finished, marking as completed to prevent timeout error")
check_completed[0] = True
# Connect signals
worker.update_available.connect(self._on_update_available)
worker.update_available.connect(on_check_done)
worker.update_status.connect(self._on_update_status)
worker.update_status.connect(on_check_done) # "Ready" status means check done
worker.check_failed.connect(self._on_check_failed)
worker.check_failed.connect(on_check_done)
worker.finished.connect(thread.quit)
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
# Clean up finished threads and workers from list
def cleanup_thread():
logger.debug(f"Cleaning up thread {id(thread)}")
if thread in self._background_threads:
self._background_threads.remove(thread)
if id(thread) in self._background_workers:
del self._background_workers[id(thread)]
thread.finished.connect(cleanup_thread)
# Move worker to thread and start
logger.debug("Moving worker to thread and connecting started signal")
worker.moveToThread(thread)
thread.started.connect(worker.run)
logger.debug("Starting thread...")
thread.start()
logger.debug("Thread started, starting safety timer")
# Start the safety timeout
safety_timer.start()
except Exception as e:
logger.error(f"Failed to start update check thread: {e}")
logger.error(f"Failed to start update check thread: {e}", exc_info=True)
def _on_update_status(self, status: str, emoji: str) -> None:
"""Handle update status changes.
@ -767,6 +806,8 @@ class MainWindow(QMainWindow):
from webdrop_bridge.core.updater import UpdateManager
try:
logger.debug("_perform_update_async() starting")
# Create update manager
manager = UpdateManager(
current_version=self.config.app_version,
@ -777,6 +818,12 @@ class MainWindow(QMainWindow):
thread = QThread()
worker = UpdateDownloadWorker(manager, release, self.config.app_version)
# IMPORTANT: Keep references to prevent garbage collection
self._background_threads.append(thread)
self._background_workers[id(thread)] = worker
logger.debug(f"Created download worker and thread, thread id: {id(thread)}")
# Connect signals
worker.download_complete.connect(self._on_download_complete)
worker.download_failed.connect(self._on_download_failed)
@ -785,31 +832,56 @@ class MainWindow(QMainWindow):
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
# Keep reference to thread to prevent garbage collection
self._background_threads.append(thread)
# Create a safety timeout timer for download (10 minutes)
# Use a flag-based approach to avoid thread issues with stopping timers
download_started_time = [datetime.now()] # Track when download started
download_completed = [False] # Flag to mark when download completes
# Clean up finished threads from list
def cleanup_thread():
if thread in self._background_threads:
self._background_threads.remove(thread)
thread.finished.connect(cleanup_thread)
# Start thread
worker.moveToThread(thread)
thread.started.connect(worker.run)
thread.start()
# Set a safety timeout - if download doesn't finish in 10 minutes (600 seconds),
# force stop to prevent infinite hang
def force_timeout():
# Check if already completed - if so, don't show error
if download_completed[0]:
logger.debug("Timeout fired but download already completed, suppressing error")
return
logger.error("Download taking too long (10 minute timeout)")
self.set_update_status("Download timed out - no server response", emoji="⏱️")
worker.download_failed.emit("Download took too long with no response")
thread.quit()
thread.wait()
QTimer.singleShot(600000, force_timeout) # 10 minutes
safety_timer = QTimer()
safety_timer.setSingleShot(True)
safety_timer.setInterval(600000) # 10 minutes
safety_timer.timeout.connect(force_timeout)
# Mark download as completed when it finishes
def on_download_done():
logger.debug("Download finished, marking as completed to prevent timeout error")
download_completed[0] = True
worker.download_complete.connect(on_download_done)
worker.download_failed.connect(on_download_done)
# Clean up finished threads from list
def cleanup_thread():
logger.debug(f"Cleaning up download thread {id(thread)}")
if thread in self._background_threads:
self._background_threads.remove(thread)
if id(thread) in self._background_workers:
del self._background_workers[id(thread)]
thread.finished.connect(cleanup_thread)
# Start thread
logger.debug("Moving download worker to thread and connecting started signal")
worker.moveToThread(thread)
thread.started.connect(worker.run)
logger.debug("Starting download thread...")
thread.start()
logger.debug("Download thread started, starting safety timer")
# Start the safety timeout
safety_timer.start()
except Exception as e:
logger.error(f"Failed to start update download: {e}")
@ -896,10 +968,13 @@ class UpdateCheckWorker(QObject):
"""Run the update check."""
loop = None
try:
logger.debug("UpdateCheckWorker.run() starting")
# Notify checking status
self.update_status.emit("Checking for updates", "🔄")
# Create a fresh event loop for this thread
logger.debug("Creating new event loop for worker thread")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
@ -912,12 +987,15 @@ class UpdateCheckWorker(QObject):
timeout=10
)
)
logger.debug(f"Update check completed, release={release}")
# Emit result
if release:
logger.info(f"Update available: {release.version}")
self.update_available.emit(release)
else:
# No update available - show ready status
logger.info("No update available")
self.update_status.emit("Ready", "")
except asyncio.TimeoutError:
@ -925,7 +1003,7 @@ class UpdateCheckWorker(QObject):
self.check_failed.emit("Server not responding - check again later")
except Exception as e:
logger.error(f"Update check failed: {e}")
logger.error(f"Update check failed: {e}", exc_info=True)
self.check_failed.emit(f"Check failed: {str(e)[:50]}")
finally:
# Properly close the event loop