mirror of https://github.com/langgenius/dify.git
Refactor: use logger = logging.getLogger(__name__) in logging (#24515)
Co-authored-by: Yongtao Huang <99629139+hyongtao-db@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
This commit is contained in:
parent
8af2ae973f
commit
fa753239ad
|
|
@ -38,6 +38,8 @@ from services.plugin.data_migration import PluginDataMigration
|
|||
from services.plugin.plugin_migration import PluginMigration
|
||||
from tasks.remove_app_and_related_data_task import delete_draft_variables_batch
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@click.command("reset-password", help="Reset the account password.")
|
||||
@click.option("--email", prompt=True, help="Account email to reset password for")
|
||||
|
|
@ -685,7 +687,7 @@ def upgrade_db():
|
|||
click.echo(click.style("Database migration successful!", fg="green"))
|
||||
|
||||
except Exception:
|
||||
logging.exception("Failed to execute database migration")
|
||||
logger.exception("Failed to execute database migration")
|
||||
finally:
|
||||
lock.release()
|
||||
else:
|
||||
|
|
@ -733,7 +735,7 @@ where sites.id is null limit 1000"""
|
|||
except Exception:
|
||||
failed_app_ids.append(app_id)
|
||||
click.echo(click.style(f"Failed to fix missing site for app {app_id}", fg="red"))
|
||||
logging.exception("Failed to fix app related site missing issue, app_id: %s", app_id)
|
||||
logger.exception("Failed to fix app related site missing issue, app_id: %s", app_id)
|
||||
continue
|
||||
|
||||
if not processed_count:
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ from services.errors.audio import (
|
|||
UnsupportedAudioTypeServiceError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatMessageAudioApi(Resource):
|
||||
@setup_required
|
||||
|
|
@ -49,7 +51,7 @@ class ChatMessageAudioApi(Resource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -70,7 +72,7 @@ class ChatMessageAudioApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("Failed to handle post request to ChatMessageAudioApi")
|
||||
logger.exception("Failed to handle post request to ChatMessageAudioApi")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -97,7 +99,7 @@ class ChatMessageTextApi(Resource):
|
|||
)
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -118,7 +120,7 @@ class ChatMessageTextApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("Failed to handle post request to ChatMessageTextApi")
|
||||
logger.exception("Failed to handle post request to ChatMessageTextApi")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -160,7 +162,7 @@ class TextModesApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("Failed to handle get request to TextModesApi")
|
||||
logger.exception("Failed to handle get request to TextModesApi")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,8 @@ from models.model import AppMode
|
|||
from services.app_generate_service import AppGenerateService
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# define completion message api for user
|
||||
class CompletionMessageApi(Resource):
|
||||
|
|
@ -67,7 +69,7 @@ class CompletionMessageApi(Resource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -80,7 +82,7 @@ class CompletionMessageApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -134,7 +136,7 @@ class ChatMessageApi(Resource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -149,7 +151,7 @@ class ChatMessageApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ from services.errors.conversation import ConversationNotExistsError
|
|||
from services.errors.message import MessageNotExistsError, SuggestedQuestionsAfterAnswerDisabledError
|
||||
from services.message_service import MessageService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatMessageListApi(Resource):
|
||||
message_infinite_scroll_pagination_fields = {
|
||||
|
|
@ -215,7 +217,7 @@ class MessageSuggestedQuestionApi(Resource):
|
|||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise AppSuggestedQuestionsAfterAnswerDisabledError()
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {"data": questions}
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ class AdvancedChatDraftWorkflowRunApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -244,7 +244,7 @@ class AdvancedChatDraftRunIterationNodeApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -280,7 +280,7 @@ class WorkflowDraftRunIterationNodeApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -317,7 +317,7 @@ class AdvancedChatDraftRunLoopNodeApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -354,7 +354,7 @@ class WorkflowDraftRunLoopNodeApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from libs.oauth_data_source import NotionOAuth
|
|||
|
||||
from ..wraps import account_initialization_required, setup_required
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_oauth_providers():
|
||||
with current_app.app_context():
|
||||
|
|
@ -80,7 +82,7 @@ class OAuthDataSourceBinding(Resource):
|
|||
try:
|
||||
oauth_provider.get_access_token(code)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"An error occurred during the OAuthCallback process with %s: %s", provider, e.response.text
|
||||
)
|
||||
return {"error": "OAuth data source process failed"}, 400
|
||||
|
|
@ -103,7 +105,7 @@ class OAuthDataSourceSync(Resource):
|
|||
try:
|
||||
oauth_provider.sync_data_source(binding_id)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"An error occurred during the OAuthCallback process with %s: %s", provider, e.response.text
|
||||
)
|
||||
return {"error": "OAuth data source process failed"}, 400
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ from services.feature_service import FeatureService
|
|||
|
||||
from .. import api
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_oauth_providers():
|
||||
with current_app.app_context():
|
||||
|
|
@ -80,7 +82,7 @@ class OAuthCallback(Resource):
|
|||
user_info = oauth_provider.get_user_info(token)
|
||||
except requests.exceptions.RequestException as e:
|
||||
error_text = e.response.text if e.response else str(e)
|
||||
logging.exception("An error occurred during the OAuth process with %s: %s", provider, error_text)
|
||||
logger.exception("An error occurred during the OAuth process with %s: %s", provider, error_text)
|
||||
return {"error": "OAuth process failed"}, 400
|
||||
|
||||
if invite_token and RegisterService.is_valid_invite_token(invite_token):
|
||||
|
|
|
|||
|
|
@ -54,6 +54,8 @@ from models import Dataset, DatasetProcessRule, Document, DocumentSegment, Uploa
|
|||
from services.dataset_service import DatasetService, DocumentService
|
||||
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DocumentResource(Resource):
|
||||
def get_document(self, dataset_id: str, document_id: str) -> Document:
|
||||
|
|
@ -966,7 +968,7 @@ class DocumentRetryApi(DocumentResource):
|
|||
raise DocumentAlreadyFinishedError()
|
||||
retry_documents.append(document)
|
||||
except Exception:
|
||||
logging.exception("Failed to retry document, document id: %s", document_id)
|
||||
logger.exception("Failed to retry document, document id: %s", document_id)
|
||||
continue
|
||||
# retry document
|
||||
DocumentService.retry_document(dataset_id, retry_documents)
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ from fields.hit_testing_fields import hit_testing_record_fields
|
|||
from services.dataset_service import DatasetService
|
||||
from services.hit_testing_service import HitTestingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatasetsHitTestingBase:
|
||||
@staticmethod
|
||||
|
|
@ -81,5 +83,5 @@ class DatasetsHitTestingBase:
|
|||
except ValueError as e:
|
||||
raise ValueError(str(e))
|
||||
except Exception as e:
|
||||
logging.exception("Hit testing failed.")
|
||||
logger.exception("Hit testing failed.")
|
||||
raise InternalServerError(str(e))
|
||||
|
|
|
|||
|
|
@ -26,6 +26,8 @@ from services.errors.audio import (
|
|||
UnsupportedAudioTypeServiceError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ChatAudioApi(InstalledAppResource):
|
||||
def post(self, installed_app):
|
||||
|
|
@ -38,7 +40,7 @@ class ChatAudioApi(InstalledAppResource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -59,7 +61,7 @@ class ChatAudioApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -83,7 +85,7 @@ class ChatTextApi(InstalledAppResource):
|
|||
response = AudioService.transcript_tts(app_model=app_model, text=text, voice=voice, message_id=message_id)
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -104,5 +106,5 @@ class ChatTextApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ from models.model import AppMode
|
|||
from services.app_generate_service import AppGenerateService
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# define completion api for user
|
||||
class CompletionApi(InstalledAppResource):
|
||||
|
|
@ -65,7 +67,7 @@ class CompletionApi(InstalledAppResource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -78,7 +80,7 @@ class CompletionApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -125,7 +127,7 @@ class ChatApi(InstalledAppResource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -140,7 +142,7 @@ class ChatApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ from services.errors.message import (
|
|||
)
|
||||
from services.message_service import MessageService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageListApi(InstalledAppResource):
|
||||
@marshal_with(message_infinite_scroll_pagination_fields)
|
||||
|
|
@ -126,7 +128,7 @@ class MessageMoreLikeThisApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -158,7 +160,7 @@ class MessageSuggestedQuestionApi(InstalledAppResource):
|
|||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {"data": questions}
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ class InstalledAppWorkflowRunApi(InstalledAppResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ from configs import dify_config
|
|||
|
||||
from . import api
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VersionApi(Resource):
|
||||
def get(self):
|
||||
|
|
@ -34,7 +36,7 @@ class VersionApi(Resource):
|
|||
try:
|
||||
response = requests.get(check_update_url, {"current_version": args.get("current_version")}, timeout=(3, 10))
|
||||
except Exception as error:
|
||||
logging.warning("Check update version error: %s.", str(error))
|
||||
logger.warning("Check update version error: %s.", str(error))
|
||||
result["version"] = args.get("current_version")
|
||||
return result
|
||||
|
||||
|
|
@ -55,7 +57,7 @@ def _has_new_version(*, latest_version: str, current_version: str) -> bool:
|
|||
# Compare versions
|
||||
return latest > current
|
||||
except version.InvalidVersion:
|
||||
logging.warning("Invalid version format: latest=%s, current=%s", latest_version, current_version)
|
||||
logger.warning("Invalid version format: latest=%s, current=%s", latest_version, current_version)
|
||||
return False
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@ from libs.login import login_required
|
|||
from services.model_load_balancing_service import ModelLoadBalancingService
|
||||
from services.model_provider_service import ModelProviderService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DefaultModelApi(Resource):
|
||||
@setup_required
|
||||
|
|
@ -73,7 +75,7 @@ class DefaultModelApi(Resource):
|
|||
model=model_setting["model"],
|
||||
)
|
||||
except Exception as ex:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Failed to update default model, model type: %s, model: %s",
|
||||
model_setting["model_type"],
|
||||
model_setting.get("model"),
|
||||
|
|
@ -278,7 +280,7 @@ class ModelProviderModelCredentialApi(Resource):
|
|||
credential_name=args["name"],
|
||||
)
|
||||
except CredentialsValidateFailedError as ex:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Failed to save model credentials, tenant_id: %s, model: %s, model_type: %s",
|
||||
tenant_id,
|
||||
args.get("model"),
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ from services.feature_service import FeatureService
|
|||
from services.file_service import FileService
|
||||
from services.workspace_service import WorkspaceService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
provider_fields = {
|
||||
"provider_name": fields.String,
|
||||
"provider_type": fields.String,
|
||||
|
|
@ -120,7 +123,7 @@ class TenantApi(Resource):
|
|||
@marshal_with(tenant_fields)
|
||||
def get(self):
|
||||
if request.path == "/info":
|
||||
logging.warning("Deprecated URL /info was used.")
|
||||
logger.warning("Deprecated URL /info was used.")
|
||||
|
||||
tenant = current_user.current_tenant
|
||||
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ from services.errors.audio import (
|
|||
UnsupportedAudioTypeServiceError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@service_api_ns.route("/audio-to-text")
|
||||
class AudioApi(Resource):
|
||||
|
|
@ -57,7 +59,7 @@ class AudioApi(Resource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -78,7 +80,7 @@ class AudioApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -121,7 +123,7 @@ class TextApi(Resource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -142,5 +144,5 @@ class TextApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
|
|
|||
|
|
@ -33,6 +33,9 @@ from services.app_generate_service import AppGenerateService
|
|||
from services.errors.app import IsDraftWorkflowError, WorkflowIdFormatError, WorkflowNotFoundError
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define parser for completion API
|
||||
completion_parser = reqparse.RequestParser()
|
||||
completion_parser.add_argument(
|
||||
|
|
@ -118,7 +121,7 @@ class CompletionApi(Resource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -131,7 +134,7 @@ class CompletionApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -209,7 +212,7 @@ class ChatApi(Resource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -224,7 +227,7 @@ class ChatApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,9 @@ from services.errors.message import (
|
|||
)
|
||||
from services.message_service import MessageService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Define parsers for message APIs
|
||||
message_list_parser = reqparse.RequestParser()
|
||||
message_list_parser.add_argument(
|
||||
|
|
@ -216,7 +219,7 @@ class MessageSuggestedApi(Resource):
|
|||
except SuggestedQuestionsAfterAnswerDisabledError:
|
||||
raise BadRequest("Suggested Questions Is Disabled.")
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {"result": "success", "data": questions}
|
||||
|
|
|
|||
|
|
@ -174,7 +174,7 @@ class WorkflowRunApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -239,7 +239,7 @@ class WorkflowRunByIdApi(Resource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ from services.enterprise.enterprise_service import EnterpriseService
|
|||
from services.feature_service import FeatureService
|
||||
from services.webapp_auth_service import WebAppAuthService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppParameterApi(WebApiResource):
|
||||
"""Resource for app variables."""
|
||||
|
|
@ -92,7 +94,7 @@ class AppWebAuthPermission(Resource):
|
|||
except Unauthorized:
|
||||
raise
|
||||
except Exception:
|
||||
logging.exception("Unexpected error during auth verification")
|
||||
logger.exception("Unexpected error during auth verification")
|
||||
raise
|
||||
|
||||
features = FeatureService.get_system_features()
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ from services.errors.audio import (
|
|||
UnsupportedAudioTypeServiceError,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AudioApi(WebApiResource):
|
||||
def post(self, app_model: App, end_user):
|
||||
|
|
@ -38,7 +40,7 @@ class AudioApi(WebApiResource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -59,7 +61,7 @@ class AudioApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("Failed to handle post request to AudioApi")
|
||||
logger.exception("Failed to handle post request to AudioApi")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -84,7 +86,7 @@ class TextApi(WebApiResource):
|
|||
|
||||
return response
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except NoAudioUploadedServiceError:
|
||||
raise NoAudioUploadedError()
|
||||
|
|
@ -105,7 +107,7 @@ class TextApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("Failed to handle post request to TextApi")
|
||||
logger.exception("Failed to handle post request to TextApi")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ from models.model import AppMode
|
|||
from services.app_generate_service import AppGenerateService
|
||||
from services.errors.llm import InvokeRateLimitError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# define completion api for user
|
||||
class CompletionApi(WebApiResource):
|
||||
|
|
@ -61,7 +63,7 @@ class CompletionApi(WebApiResource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -74,7 +76,7 @@ class CompletionApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -119,7 +121,7 @@ class ChatApi(WebApiResource):
|
|||
except services.errors.conversation.ConversationCompletedError:
|
||||
raise ConversationCompletedError()
|
||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||
logging.exception("App model config broken.")
|
||||
logger.exception("App model config broken.")
|
||||
raise AppUnavailableError()
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
|
|
@ -134,7 +136,7 @@ class ChatApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception as e:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ from services.errors.message import (
|
|||
)
|
||||
from services.message_service import MessageService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageListApi(WebApiResource):
|
||||
message_fields = {
|
||||
|
|
@ -145,7 +147,7 @@ class MessageMoreLikeThisApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
@ -176,7 +178,7 @@ class MessageSuggestedQuestionApi(WebApiResource):
|
|||
except InvokeError as e:
|
||||
raise CompletionRequestError(e.description)
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
return {"data": questions}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ class WorkflowRunApi(WebApiResource):
|
|||
except ValueError as e:
|
||||
raise e
|
||||
except Exception:
|
||||
logging.exception("internal server error.")
|
||||
logger.exception("internal server error.")
|
||||
raise InternalServerError()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ from core.app.entities.task_entities import AppBlockingResponse, AppStreamRespon
|
|||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppGenerateResponseConverter(ABC):
|
||||
_blocking_response_type: type[AppBlockingResponse]
|
||||
|
|
@ -120,7 +122,7 @@ class AppGenerateResponseConverter(ABC):
|
|||
if data:
|
||||
data.setdefault("message", getattr(e, "description", str(e)))
|
||||
else:
|
||||
logging.error(e)
|
||||
logger.error(e)
|
||||
data = {
|
||||
"code": "internal_server_error",
|
||||
"message": "Internal Server Error, please contact support.",
|
||||
|
|
|
|||
|
|
@ -32,6 +32,8 @@ from extensions.ext_database import db
|
|||
from models.model import AppMode, Conversation, MessageAnnotation, MessageFile
|
||||
from services.annotation_service import AppAnnotationService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageCycleManager:
|
||||
def __init__(
|
||||
|
|
@ -98,7 +100,7 @@ class MessageCycleManager:
|
|||
conversation.name = name
|
||||
except Exception as e:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception("generate conversation name failed, conversation_id: %s", conversation_id)
|
||||
logger.exception("generate conversation name failed, conversation_id: %s", conversation_id)
|
||||
pass
|
||||
|
||||
db.session.merge(conversation)
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from pydantic import BaseModel
|
|||
|
||||
from core.helper.position_helper import sort_to_dict_by_position_map
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExtensionModule(enum.Enum):
|
||||
MODERATION = "moderation"
|
||||
|
|
@ -66,7 +68,7 @@ class Extensible:
|
|||
|
||||
# Check for extension module file
|
||||
if (extension_name + ".py") not in file_names:
|
||||
logging.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path)
|
||||
logger.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path)
|
||||
continue
|
||||
|
||||
# Check for builtin flag and position
|
||||
|
|
@ -95,7 +97,7 @@ class Extensible:
|
|||
break
|
||||
|
||||
if not extension_class:
|
||||
logging.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name)
|
||||
logger.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name)
|
||||
continue
|
||||
|
||||
# Load schema if not builtin
|
||||
|
|
@ -103,7 +105,7 @@ class Extensible:
|
|||
if not builtin:
|
||||
json_path = os.path.join(subdir_path, "schema.json")
|
||||
if not os.path.exists(json_path):
|
||||
logging.warning("Missing schema.json file in %s, Skip.", subdir_path)
|
||||
logger.warning("Missing schema.json file in %s, Skip.", subdir_path)
|
||||
continue
|
||||
|
||||
with open(json_path, encoding="utf-8") as f:
|
||||
|
|
@ -122,7 +124,7 @@ class Extensible:
|
|||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("Error scanning extensions")
|
||||
logger.exception("Error scanning extensions")
|
||||
raise
|
||||
|
||||
# Sort extensions by position
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import sys
|
|||
from types import ModuleType
|
||||
from typing import AnyStr
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_lazy_loader: bool = False) -> ModuleType:
|
||||
"""
|
||||
|
|
@ -30,7 +32,7 @@ def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_laz
|
|||
spec.loader.exec_module(module)
|
||||
return module
|
||||
except Exception as e:
|
||||
logging.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path))
|
||||
logger.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path))
|
||||
raise e
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ import httpx
|
|||
|
||||
from configs import dify_config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SSRF_DEFAULT_MAX_RETRIES = dify_config.SSRF_DEFAULT_MAX_RETRIES
|
||||
|
||||
HTTP_REQUEST_NODE_SSL_VERIFY = True # Default value for HTTP_REQUEST_NODE_SSL_VERIFY is True
|
||||
|
|
@ -73,12 +75,12 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
|
|||
if response.status_code not in STATUS_FORCELIST:
|
||||
return response
|
||||
else:
|
||||
logging.warning(
|
||||
logger.warning(
|
||||
"Received status code %s for URL %s which is in the force list", response.status_code, url
|
||||
)
|
||||
|
||||
except httpx.RequestError as e:
|
||||
logging.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e)
|
||||
logger.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e)
|
||||
if max_retries == 0:
|
||||
raise
|
||||
|
||||
|
|
|
|||
|
|
@ -39,6 +39,8 @@ from models.dataset import Document as DatasetDocument
|
|||
from models.model import UploadFile
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IndexingRunner:
|
||||
def __init__(self):
|
||||
|
|
@ -90,9 +92,9 @@ class IndexingRunner:
|
|||
dataset_document.stopped_at = naive_utc_now()
|
||||
db.session.commit()
|
||||
except ObjectDeletedError:
|
||||
logging.warning("Document deleted, document id: %s", dataset_document.id)
|
||||
logger.warning("Document deleted, document id: %s", dataset_document.id)
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
logger.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = naive_utc_now()
|
||||
|
|
@ -153,7 +155,7 @@ class IndexingRunner:
|
|||
dataset_document.stopped_at = naive_utc_now()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
logger.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = naive_utc_now()
|
||||
|
|
@ -228,7 +230,7 @@ class IndexingRunner:
|
|||
dataset_document.stopped_at = naive_utc_now()
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
logger.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e)
|
||||
dataset_document.stopped_at = naive_utc_now()
|
||||
|
|
@ -321,7 +323,7 @@ class IndexingRunner:
|
|||
try:
|
||||
storage.delete(image_file.key)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Delete image_files failed while indexing_estimate, \
|
||||
image_upload_file_is: %s",
|
||||
upload_file_id,
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution
|
|||
from core.workflow.graph_engine.entities.event import AgentLogEvent
|
||||
from models import App, Message, WorkflowNodeExecutionModel, db
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LLMGenerator:
|
||||
@classmethod
|
||||
|
|
@ -68,7 +70,7 @@ class LLMGenerator:
|
|||
result_dict = json.loads(cleaned_answer)
|
||||
answer = result_dict["Your Output"]
|
||||
except json.JSONDecodeError as e:
|
||||
logging.exception("Failed to generate name after answer, use query instead")
|
||||
logger.exception("Failed to generate name after answer, use query instead")
|
||||
answer = query
|
||||
name = answer.strip()
|
||||
|
||||
|
|
@ -125,7 +127,7 @@ class LLMGenerator:
|
|||
except InvokeError:
|
||||
questions = []
|
||||
except Exception:
|
||||
logging.exception("Failed to generate suggested questions after answer")
|
||||
logger.exception("Failed to generate suggested questions after answer")
|
||||
questions = []
|
||||
|
||||
return questions
|
||||
|
|
@ -173,7 +175,7 @@ class LLMGenerator:
|
|||
error = str(e)
|
||||
error_step = "generate rule config"
|
||||
except Exception as e:
|
||||
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
logger.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
|
|
@ -270,7 +272,7 @@ class LLMGenerator:
|
|||
error_step = "generate conversation opener"
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
logger.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
|
|
@ -319,7 +321,7 @@ class LLMGenerator:
|
|||
error = str(e)
|
||||
return {"code": "", "language": code_language, "error": f"Failed to generate code. Error: {error}"}
|
||||
except Exception as e:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Failed to invoke LLM model, model: %s, language: %s", model_config.get("name"), code_language
|
||||
)
|
||||
return {"code": "", "language": code_language, "error": f"An unexpected error occurred: {str(e)}"}
|
||||
|
|
@ -392,7 +394,7 @@ class LLMGenerator:
|
|||
error = str(e)
|
||||
return {"output": "", "error": f"Failed to generate JSON Schema. Error: {error}"}
|
||||
except Exception as e:
|
||||
logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name"))
|
||||
logger.exception("Failed to invoke LLM model, model: %s", model_config.get("name"))
|
||||
return {"output": "", "error": f"An unexpected error occurred: {str(e)}"}
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -570,5 +572,5 @@ class LLMGenerator:
|
|||
error = str(e)
|
||||
return {"error": f"Failed to generate code. Error: {error}"}
|
||||
except Exception as e:
|
||||
logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name"), exc_info=e)
|
||||
logger.exception("Failed to invoke LLM model, model: " + json.dumps(model_config.get("name")), exc_info=e)
|
||||
return {"error": f"An unexpected error occurred: {str(e)}"}
|
||||
|
|
|
|||
|
|
@ -152,7 +152,7 @@ class MCPClient:
|
|||
# ExitStack will handle proper cleanup of all managed context managers
|
||||
self._exit_stack.close()
|
||||
except Exception as e:
|
||||
logging.exception("Error during cleanup")
|
||||
logger.exception("Error during cleanup")
|
||||
raise ValueError(f"Error during cleanup: {e}")
|
||||
finally:
|
||||
self._session = None
|
||||
|
|
|
|||
|
|
@ -31,6 +31,9 @@ from core.mcp.types import (
|
|||
SessionMessage,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SendRequestT = TypeVar("SendRequestT", ClientRequest, ServerRequest)
|
||||
SendResultT = TypeVar("SendResultT", ClientResult, ServerResult)
|
||||
SendNotificationT = TypeVar("SendNotificationT", ClientNotification, ServerNotification)
|
||||
|
|
@ -366,7 +369,7 @@ class BaseSession(
|
|||
self._handle_incoming(notification)
|
||||
except Exception as e:
|
||||
# For other validation errors, log and continue
|
||||
logging.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root)
|
||||
logger.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root)
|
||||
else: # Response or error
|
||||
response_queue = self._response_streams.get(message.message.root.id)
|
||||
if response_queue is not None:
|
||||
|
|
@ -376,7 +379,7 @@ class BaseSession(
|
|||
except queue.Empty:
|
||||
continue
|
||||
except Exception:
|
||||
logging.exception("Error in message processing loop")
|
||||
logger.exception("Error in message processing loop")
|
||||
raise
|
||||
|
||||
def _received_request(self, responder: RequestResponder[ReceiveRequestT, SendResultT]) -> None:
|
||||
|
|
|
|||
|
|
@ -306,7 +306,7 @@ class AliyunDataTrace(BaseTraceInstance):
|
|||
node_span = self.build_workflow_task_span(trace_id, workflow_span_id, trace_info, node_execution)
|
||||
return node_span
|
||||
except Exception as e:
|
||||
logging.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True)
|
||||
logger.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True)
|
||||
return None
|
||||
|
||||
def get_workflow_node_status(self, node_execution: WorkflowNodeExecution) -> Status:
|
||||
|
|
|
|||
|
|
@ -37,6 +37,8 @@ from models.model import App, AppModelConfig, Conversation, Message, MessageFile
|
|||
from models.workflow import WorkflowAppLog, WorkflowRun
|
||||
from tasks.ops_trace_task import process_trace_tasks
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OpsTraceProviderConfigMap(dict[str, dict[str, Any]]):
|
||||
def __getitem__(self, provider: str) -> dict[str, Any]:
|
||||
|
|
@ -287,7 +289,7 @@ class OpsTraceManager:
|
|||
# create new tracing_instance and update the cache if it absent
|
||||
tracing_instance = trace_instance(config_class(**decrypt_trace_config))
|
||||
cls.ops_trace_instances_cache[decrypt_trace_config_key] = tracing_instance
|
||||
logging.info("new tracing_instance for app_id: %s", app_id)
|
||||
logger.info("new tracing_instance for app_id: %s", app_id)
|
||||
return tracing_instance
|
||||
|
||||
@classmethod
|
||||
|
|
@ -849,7 +851,7 @@ class TraceQueueManager:
|
|||
trace_task.app_id = self.app_id
|
||||
trace_manager_queue.put(trace_task)
|
||||
except Exception as e:
|
||||
logging.exception("Error adding trace task, trace_type %s", trace_task.trace_type)
|
||||
logger.exception("Error adding trace task, trace_type %s", trace_task.trace_type)
|
||||
finally:
|
||||
self.start_timer()
|
||||
|
||||
|
|
@ -868,7 +870,7 @@ class TraceQueueManager:
|
|||
if tasks:
|
||||
self.send_to_celery(tasks)
|
||||
except Exception as e:
|
||||
logging.exception("Error processing trace tasks")
|
||||
logger.exception("Error processing trace tasks")
|
||||
|
||||
def start_timer(self):
|
||||
global trace_manager_timer
|
||||
|
|
|
|||
|
|
@ -141,11 +141,11 @@ class BasePluginClient:
|
|||
response.raise_for_status()
|
||||
except HTTPError as e:
|
||||
msg = f"Failed to request plugin daemon, status: {e.response.status_code}, url: {path}"
|
||||
logging.exception(msg)
|
||||
logger.exception(msg)
|
||||
raise e
|
||||
except Exception as e:
|
||||
msg = f"Failed to request plugin daemon, url: {path}"
|
||||
logging.exception(msg)
|
||||
logger.exception(msg)
|
||||
raise ValueError(msg) from e
|
||||
|
||||
try:
|
||||
|
|
@ -158,7 +158,7 @@ class BasePluginClient:
|
|||
f"Failed to parse response from plugin daemon to PluginDaemonBasicResponse [{str(type.__name__)}],"
|
||||
f" url: {path}"
|
||||
)
|
||||
logging.exception(msg)
|
||||
logger.exception(msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
if rep.code != 0:
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@ from core.rag.embedding.embedding_base import Embeddings
|
|||
from core.rag.models.document import Document
|
||||
from models.dataset import Dataset
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MyScaleConfig(BaseModel):
|
||||
host: str
|
||||
|
|
@ -53,7 +55,7 @@ class MyScaleVector(BaseVector):
|
|||
return self.add_texts(documents=texts, embeddings=embeddings, **kwargs)
|
||||
|
||||
def _create_collection(self, dimension: int):
|
||||
logging.info("create MyScale collection %s with dimension %s", self._collection_name, dimension)
|
||||
logger.info("create MyScale collection %s with dimension %s", self._collection_name, dimension)
|
||||
self._client.command(f"CREATE DATABASE IF NOT EXISTS {self._config.database}")
|
||||
fts_params = f"('{self._config.fts_params}')" if self._config.fts_params else ""
|
||||
sql = f"""
|
||||
|
|
@ -151,7 +153,7 @@ class MyScaleVector(BaseVector):
|
|||
for r in self._client.query(sql).named_results()
|
||||
]
|
||||
except Exception as e:
|
||||
logging.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
|
||||
logger.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
|
||||
return []
|
||||
|
||||
def delete(self) -> None:
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ from core.rag.models.document import Document
|
|||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import Dataset
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PGVectorConfig(BaseModel):
|
||||
host: str
|
||||
|
|
@ -155,7 +157,7 @@ class PGVector(BaseVector):
|
|||
cur.execute(f"DELETE FROM {self.table_name} WHERE id IN %s", (tuple(ids),))
|
||||
except psycopg2.errors.UndefinedTable:
|
||||
# table not exists
|
||||
logging.warning("Table %s not found, skipping delete operation.", self.table_name)
|
||||
logger.warning("Table %s not found, skipping delete operation.", self.table_name)
|
||||
return
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ from core.rag.models.document import Document
|
|||
from extensions.ext_redis import redis_client
|
||||
from models import Dataset
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TableStoreConfig(BaseModel):
|
||||
access_key_id: Optional[str] = None
|
||||
|
|
@ -145,7 +147,7 @@ class TableStoreVector(BaseVector):
|
|||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logging.info("Collection %s already exists.", self._collection_name)
|
||||
logger.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
|
||||
self._create_table_if_not_exist()
|
||||
|
|
@ -155,7 +157,7 @@ class TableStoreVector(BaseVector):
|
|||
def _create_table_if_not_exist(self) -> None:
|
||||
table_list = self._tablestore_client.list_table()
|
||||
if self._table_name in table_list:
|
||||
logging.info("Tablestore system table[%s] already exists", self._table_name)
|
||||
logger.info("Tablestore system table[%s] already exists", self._table_name)
|
||||
return None
|
||||
|
||||
schema_of_primary_key = [("id", "STRING")]
|
||||
|
|
@ -163,12 +165,12 @@ class TableStoreVector(BaseVector):
|
|||
table_options = tablestore.TableOptions()
|
||||
reserved_throughput = tablestore.ReservedThroughput(tablestore.CapacityUnit(0, 0))
|
||||
self._tablestore_client.create_table(table_meta, table_options, reserved_throughput)
|
||||
logging.info("Tablestore create table[%s] successfully.", self._table_name)
|
||||
logger.info("Tablestore create table[%s] successfully.", self._table_name)
|
||||
|
||||
def _create_search_index_if_not_exist(self, dimension: int) -> None:
|
||||
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
|
||||
if self._index_name in [t[1] for t in search_index_list]:
|
||||
logging.info("Tablestore system index[%s] already exists", self._index_name)
|
||||
logger.info("Tablestore system index[%s] already exists", self._index_name)
|
||||
return None
|
||||
|
||||
field_schemas = [
|
||||
|
|
@ -206,20 +208,20 @@ class TableStoreVector(BaseVector):
|
|||
|
||||
index_meta = tablestore.SearchIndexMeta(field_schemas)
|
||||
self._tablestore_client.create_search_index(self._table_name, self._index_name, index_meta)
|
||||
logging.info("Tablestore create system index[%s] successfully.", self._index_name)
|
||||
logger.info("Tablestore create system index[%s] successfully.", self._index_name)
|
||||
|
||||
def _delete_table_if_exist(self):
|
||||
search_index_list = self._tablestore_client.list_search_index(table_name=self._table_name)
|
||||
for resp_tuple in search_index_list:
|
||||
self._tablestore_client.delete_search_index(resp_tuple[0], resp_tuple[1])
|
||||
logging.info("Tablestore delete index[%s] successfully.", self._index_name)
|
||||
logger.info("Tablestore delete index[%s] successfully.", self._index_name)
|
||||
|
||||
self._tablestore_client.delete_table(self._table_name)
|
||||
logging.info("Tablestore delete system table[%s] successfully.", self._index_name)
|
||||
logger.info("Tablestore delete system table[%s] successfully.", self._index_name)
|
||||
|
||||
def _delete_search_index(self) -> None:
|
||||
self._tablestore_client.delete_search_index(self._table_name, self._index_name)
|
||||
logging.info("Tablestore delete index[%s] successfully.", self._index_name)
|
||||
logger.info("Tablestore delete index[%s] successfully.", self._index_name)
|
||||
|
||||
def _write_row(self, primary_key: str, attributes: dict[str, Any]) -> None:
|
||||
pk = [("id", primary_key)]
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ class CacheEmbedding(Embeddings):
|
|||
except IntegrityError:
|
||||
db.session.rollback()
|
||||
except Exception:
|
||||
logging.exception("Failed transform embedding")
|
||||
logger.exception("Failed transform embedding")
|
||||
cache_embeddings = []
|
||||
try:
|
||||
for i, n_embedding in zip(embedding_queue_indices, embedding_queue_embeddings):
|
||||
|
|
@ -122,7 +122,7 @@ class CacheEmbedding(Embeddings):
|
|||
raise ValueError("Normalized embedding is nan please try again")
|
||||
except Exception as ex:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text))
|
||||
logger.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text))
|
||||
raise ex
|
||||
|
||||
try:
|
||||
|
|
@ -136,7 +136,7 @@ class CacheEmbedding(Embeddings):
|
|||
redis_client.setex(embedding_cache_key, 600, encoded_str)
|
||||
except Exception as ex:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Failed to add embedding to redis for the text '%s...(%s chars)'", text[:10], len(text)
|
||||
)
|
||||
raise ex
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ from libs import helper
|
|||
from models.dataset import Dataset
|
||||
from services.entities.knowledge_entities.knowledge_entities import Rule
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class QAIndexProcessor(BaseIndexProcessor):
|
||||
def extract(self, extract_setting: ExtractSetting, **kwargs) -> list[Document]:
|
||||
|
|
@ -182,7 +184,7 @@ class QAIndexProcessor(BaseIndexProcessor):
|
|||
qa_documents.append(qa_document)
|
||||
format_documents.extend(qa_documents)
|
||||
except Exception as e:
|
||||
logging.exception("Failed to format qa document")
|
||||
logger.exception("Failed to format qa document")
|
||||
|
||||
all_qa_documents.extend(format_documents)
|
||||
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ from extensions.ext_database import db
|
|||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import Document
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@document_index_created.connect
|
||||
def handle(sender, **kwargs):
|
||||
|
|
@ -19,7 +21,7 @@ def handle(sender, **kwargs):
|
|||
documents = []
|
||||
start_at = time.perf_counter()
|
||||
for document_id in document_ids:
|
||||
logging.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
|
||||
document = (
|
||||
db.session.query(Document)
|
||||
|
|
@ -44,6 +46,6 @@ def handle(sender, **kwargs):
|
|||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(documents)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@ from flask import Flask
|
|||
from configs import dify_config
|
||||
from dify_app import DifyApp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Mail:
|
||||
def __init__(self):
|
||||
|
|
@ -18,7 +20,7 @@ class Mail:
|
|||
def init_app(self, app: Flask):
|
||||
mail_type = dify_config.MAIL_TYPE
|
||||
if not mail_type:
|
||||
logging.warning("MAIL_TYPE is not set")
|
||||
logger.warning("MAIL_TYPE is not set")
|
||||
return
|
||||
|
||||
if dify_config.MAIL_DEFAULT_SEND_FROM:
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@ from dify_app import DifyApp
|
|||
from libs.helper import extract_tenant_id
|
||||
from models import Account, EndUser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@user_logged_in.connect
|
||||
@user_loaded_from_request.connect
|
||||
|
|
@ -33,7 +35,7 @@ def on_user_loaded(_sender, user: Union["Account", "EndUser"]):
|
|||
current_span.set_attribute("service.tenant.id", tenant_id)
|
||||
current_span.set_attribute("service.user.id", user.id)
|
||||
except Exception:
|
||||
logging.exception("Error setting tenant and user attributes")
|
||||
logger.exception("Error setting tenant and user attributes")
|
||||
pass
|
||||
|
||||
|
||||
|
|
@ -74,12 +76,12 @@ def init_app(app: DifyApp):
|
|||
attributes[SpanAttributes.HTTP_METHOD] = str(request.method)
|
||||
_http_response_counter.add(1, attributes)
|
||||
except Exception:
|
||||
logging.exception("Error setting status and attributes")
|
||||
logger.exception("Error setting status and attributes")
|
||||
pass
|
||||
|
||||
instrumentor = FlaskInstrumentor()
|
||||
if dify_config.DEBUG:
|
||||
logging.info("Initializing Flask instrumentor")
|
||||
logger.info("Initializing Flask instrumentor")
|
||||
instrumentor.instrument_app(app, response_hook=response_hook)
|
||||
|
||||
def init_sqlalchemy_instrumentor(app: DifyApp):
|
||||
|
|
@ -253,5 +255,5 @@ def init_celery_worker(*args, **kwargs):
|
|||
tracer_provider = get_tracer_provider()
|
||||
metric_provider = get_meter_provider()
|
||||
if dify_config.DEBUG:
|
||||
logging.info("Initializing OpenTelemetry for Celery worker")
|
||||
logger.info("Initializing OpenTelemetry for Celery worker")
|
||||
CeleryInstrumentor(tracer_provider=tracer_provider, meter_provider=metric_provider).instrument()
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ from flask.signals import request_finished, request_started
|
|||
|
||||
from configs import dify_config
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _is_content_type_json(content_type: str) -> bool:
|
||||
|
|
@ -20,20 +20,20 @@ def _is_content_type_json(content_type: str) -> bool:
|
|||
|
||||
def _log_request_started(_sender, **_extra):
|
||||
"""Log the start of a request."""
|
||||
if not _logger.isEnabledFor(logging.DEBUG):
|
||||
if not logger.isEnabledFor(logging.DEBUG):
|
||||
return
|
||||
|
||||
request = flask.request
|
||||
if not (_is_content_type_json(request.content_type) and request.data):
|
||||
_logger.debug("Received Request %s -> %s", request.method, request.path)
|
||||
logger.debug("Received Request %s -> %s", request.method, request.path)
|
||||
return
|
||||
try:
|
||||
json_data = json.loads(request.data)
|
||||
except (TypeError, ValueError):
|
||||
_logger.exception("Failed to parse JSON request")
|
||||
logger.exception("Failed to parse JSON request")
|
||||
return
|
||||
formatted_json = json.dumps(json_data, ensure_ascii=False, indent=2)
|
||||
_logger.debug(
|
||||
logger.debug(
|
||||
"Received Request %s -> %s, Request Body:\n%s",
|
||||
request.method,
|
||||
request.path,
|
||||
|
|
@ -43,21 +43,21 @@ def _log_request_started(_sender, **_extra):
|
|||
|
||||
def _log_request_finished(_sender, response, **_extra):
|
||||
"""Log the end of a request."""
|
||||
if not _logger.isEnabledFor(logging.DEBUG) or response is None:
|
||||
if not logger.isEnabledFor(logging.DEBUG) or response is None:
|
||||
return
|
||||
|
||||
if not _is_content_type_json(response.content_type):
|
||||
_logger.debug("Response %s %s", response.status, response.content_type)
|
||||
logger.debug("Response %s %s", response.status, response.content_type)
|
||||
return
|
||||
|
||||
response_data = response.get_data(as_text=True)
|
||||
try:
|
||||
json_data = json.loads(response_data)
|
||||
except (TypeError, ValueError):
|
||||
_logger.exception("Failed to parse JSON response")
|
||||
logger.exception("Failed to parse JSON response")
|
||||
return
|
||||
formatted_json = json.dumps(json_data, ensure_ascii=False, indent=2)
|
||||
_logger.debug(
|
||||
logger.debug(
|
||||
"Response %s %s, Response Body:\n%s",
|
||||
response.status,
|
||||
response.content_type,
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ if TYPE_CHECKING:
|
|||
from models.account import Account
|
||||
from models.model import EndUser
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_tenant_id(user: Union["Account", "EndUser"]) -> str | None:
|
||||
"""
|
||||
|
|
@ -321,7 +323,7 @@ class TokenManager:
|
|||
key = cls._get_token_key(token, token_type)
|
||||
token_data_json = redis_client.get(key)
|
||||
if token_data_json is None:
|
||||
logging.warning("%s token %s not found with key %s", token_type, token, key)
|
||||
logger.warning("%s token %s not found with key %s", token_type, token, key)
|
||||
return None
|
||||
token_data: Optional[dict[str, Any]] = json.loads(token_data_json)
|
||||
return token_data
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@ import sendgrid # type: ignore
|
|||
from python_http_client.exceptions import ForbiddenError, UnauthorizedError
|
||||
from sendgrid.helpers.mail import Content, Email, Mail, To # type: ignore
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SendGridClient:
|
||||
def __init__(self, sendgrid_api_key: str, _from: str):
|
||||
|
|
@ -11,7 +13,7 @@ class SendGridClient:
|
|||
self._from = _from
|
||||
|
||||
def send(self, mail: dict):
|
||||
logging.debug("Sending email with SendGrid")
|
||||
logger.debug("Sending email with SendGrid")
|
||||
|
||||
try:
|
||||
_to = mail["to"]
|
||||
|
|
@ -27,19 +29,19 @@ class SendGridClient:
|
|||
mail = Mail(from_email, to_email, subject, content)
|
||||
mail_json = mail.get() # type: ignore
|
||||
response = sg.client.mail.send.post(request_body=mail_json)
|
||||
logging.debug(response.status_code)
|
||||
logging.debug(response.body)
|
||||
logging.debug(response.headers)
|
||||
logger.debug(response.status_code)
|
||||
logger.debug(response.body)
|
||||
logger.debug(response.headers)
|
||||
|
||||
except TimeoutError as e:
|
||||
logging.exception("SendGridClient Timeout occurred while sending email")
|
||||
logger.exception("SendGridClient Timeout occurred while sending email")
|
||||
raise
|
||||
except (UnauthorizedError, ForbiddenError) as e:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"SendGridClient Authentication failed. "
|
||||
"Verify that your credentials and the 'from' email address are correct"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.exception("SendGridClient Unexpected error occurred while sending email to %s", _to)
|
||||
logger.exception("SendGridClient Unexpected error occurred while sending email to %s", _to)
|
||||
raise
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ import smtplib
|
|||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SMTPClient:
|
||||
def __init__(
|
||||
|
|
@ -44,13 +46,13 @@ class SMTPClient:
|
|||
|
||||
smtp.sendmail(self._from, mail["to"], msg.as_string())
|
||||
except smtplib.SMTPException as e:
|
||||
logging.exception("SMTP error occurred")
|
||||
logger.exception("SMTP error occurred")
|
||||
raise
|
||||
except TimeoutError as e:
|
||||
logging.exception("Timeout occurred while sending email")
|
||||
logger.exception("Timeout occurred while sending email")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.exception("Unexpected error occurred while sending email to %s", mail["to"])
|
||||
logger.exception("Unexpected error occurred while sending email to %s", mail["to"])
|
||||
raise
|
||||
finally:
|
||||
if smtp:
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ from .engine import db
|
|||
from .model import App, Tag, TagBinding, UploadFile
|
||||
from .types import StringUUID
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatasetPermissionEnum(enum.StrEnum):
|
||||
ONLY_ME = "only_me"
|
||||
|
|
@ -914,7 +916,7 @@ class DatasetKeywordTable(Base):
|
|||
return json.loads(keyword_table_text.decode("utf-8"), cls=SetDecoder)
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.exception("Failed to load keyword table from file: %s", file_key)
|
||||
logger.exception("Failed to load keyword table from file: %s", file_key)
|
||||
return None
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ from .engine import db
|
|||
from .enums import CreatorUserRole, DraftVariableType
|
||||
from .types import EnumText, StringUUID
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WorkflowType(Enum):
|
||||
|
|
@ -1055,7 +1055,7 @@ class WorkflowDraftVariable(Base):
|
|||
def get_selector(self) -> list[str]:
|
||||
selector = json.loads(self.selector)
|
||||
if not isinstance(selector, list):
|
||||
_logger.error(
|
||||
logger.error(
|
||||
"invalid selector loaded from database, type=%s, value=%s",
|
||||
type(selector),
|
||||
self.selector,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from models.model import (
|
|||
from models.web import SavedMessage
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@app.celery.task(queue="dataset")
|
||||
|
|
@ -50,7 +50,7 @@ def clean_messages():
|
|||
plan_sandbox_clean_message_day = message.created_at
|
||||
app = db.session.query(App).filter_by(id=message.app_id).first()
|
||||
if not app:
|
||||
_logger.warning(
|
||||
logger.warning(
|
||||
"Expected App record to exist, but none was found, app_id=%s, message_id=%s",
|
||||
message.app_id,
|
||||
message.id,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from models.model import (
|
|||
)
|
||||
from models.workflow import ConversationVariable, WorkflowAppLog, WorkflowNodeExecutionModel, WorkflowRun
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
MAX_RETRIES = 3
|
||||
|
|
@ -39,9 +39,9 @@ def clean_workflow_runlogs_precise():
|
|||
try:
|
||||
total_workflow_runs = db.session.query(WorkflowRun).where(WorkflowRun.created_at < cutoff_date).count()
|
||||
if total_workflow_runs == 0:
|
||||
_logger.info("No expired workflow run logs found")
|
||||
logger.info("No expired workflow run logs found")
|
||||
return
|
||||
_logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
|
||||
logger.info("Found %s expired workflow run logs to clean", total_workflow_runs)
|
||||
|
||||
total_deleted = 0
|
||||
failed_batches = 0
|
||||
|
|
@ -66,20 +66,20 @@ def clean_workflow_runlogs_precise():
|
|||
else:
|
||||
failed_batches += 1
|
||||
if failed_batches >= MAX_RETRIES:
|
||||
_logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
|
||||
logger.error("Failed to delete batch after %s retries, aborting cleanup for today", MAX_RETRIES)
|
||||
break
|
||||
else:
|
||||
# Calculate incremental delay times: 5, 10, 15 minutes
|
||||
retry_delay_minutes = failed_batches * 5
|
||||
_logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
|
||||
logger.warning("Batch deletion failed, retrying in %s minutes...", retry_delay_minutes)
|
||||
time.sleep(retry_delay_minutes * 60)
|
||||
continue
|
||||
|
||||
_logger.info("Cleanup completed: %s expired workflow run logs deleted", total_deleted)
|
||||
logger.info("Cleanup completed: %s expired workflow run logs deleted", total_deleted)
|
||||
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
_logger.exception("Unexpected error in workflow log cleanup")
|
||||
logger.exception("Unexpected error in workflow log cleanup")
|
||||
raise
|
||||
|
||||
end_at = time.perf_counter()
|
||||
|
|
@ -151,5 +151,5 @@ def _delete_batch_with_retry(workflow_run_ids: list[str], attempt_count: int) ->
|
|||
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
_logger.exception("Batch deletion failed (attempt %s)", attempt_count + 1)
|
||||
logger.exception("Batch deletion failed (attempt %s)", attempt_count + 1)
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from models.account import Account, Tenant, TenantAccountJoin
|
|||
from models.dataset import Dataset, DatasetAutoDisableLog
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@app.celery.task(queue="dataset")
|
||||
def mail_clean_document_notify_task():
|
||||
|
|
@ -24,7 +26,7 @@ def mail_clean_document_notify_task():
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style("Start send document clean notify mail", fg="green"))
|
||||
logger.info(click.style("Start send document clean notify mail", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
# send document clean notify mail
|
||||
|
|
@ -89,8 +91,6 @@ def mail_clean_document_notify_task():
|
|||
dataset_auto_disable_log.notified = True
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style(f"Send document clean notify mail succeeded: latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
logger.info(click.style(f"Send document clean notify mail succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Send document clean notify mail failed")
|
||||
logger.exception("Send document clean notify mail failed")
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@ celery_redis = Redis(
|
|||
db=int(redis_config.get("virtual_host")) if redis_config.get("virtual_host") else 1,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@app.celery.task(queue="monitor")
|
||||
def queue_monitor_task():
|
||||
|
|
@ -25,24 +27,24 @@ def queue_monitor_task():
|
|||
threshold = dify_config.QUEUE_MONITOR_THRESHOLD
|
||||
|
||||
if threshold is None:
|
||||
logging.warning(click.style("QUEUE_MONITOR_THRESHOLD is not configured, skipping monitoring", fg="yellow"))
|
||||
logger.warning(click.style("QUEUE_MONITOR_THRESHOLD is not configured, skipping monitoring", fg="yellow"))
|
||||
return
|
||||
|
||||
try:
|
||||
queue_length = celery_redis.llen(f"{queue_name}")
|
||||
logging.info(click.style(f"Start monitor {queue_name}", fg="green"))
|
||||
logger.info(click.style(f"Start monitor {queue_name}", fg="green"))
|
||||
|
||||
if queue_length is None:
|
||||
logging.error(
|
||||
logger.error(
|
||||
click.style(f"Failed to get queue length for {queue_name} - Redis may be unavailable", fg="red")
|
||||
)
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Queue length: {queue_length}", fg="green"))
|
||||
logger.info(click.style(f"Queue length: {queue_length}", fg="green"))
|
||||
|
||||
if queue_length >= threshold:
|
||||
warning_msg = f"Queue {queue_name} task count exceeded the limit.: {queue_length}/{threshold}"
|
||||
logging.warning(click.style(warning_msg, fg="red"))
|
||||
logger.warning(click.style(warning_msg, fg="red"))
|
||||
alter_emails = dify_config.QUEUE_MONITOR_ALERT_EMAILS
|
||||
if alter_emails:
|
||||
to_list = alter_emails.split(",")
|
||||
|
|
@ -62,10 +64,10 @@ def queue_monitor_task():
|
|||
},
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception(click.style("Exception occurred during sending email", fg="red"))
|
||||
logger.exception(click.style("Exception occurred during sending email", fg="red"))
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(click.style("Exception occurred during queue monitoring", fg="red"))
|
||||
logger.exception(click.style("Exception occurred during queue monitoring", fg="red"))
|
||||
finally:
|
||||
if db.session.is_active:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -67,6 +67,8 @@ from tasks.mail_owner_transfer_task import (
|
|||
)
|
||||
from tasks.mail_reset_password_task import send_reset_password_mail_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TokenPair(BaseModel):
|
||||
access_token: str
|
||||
|
|
@ -332,9 +334,9 @@ class AccountService:
|
|||
db.session.add(account_integrate)
|
||||
|
||||
db.session.commit()
|
||||
logging.info("Account %s linked %s account %s.", account.id, provider, open_id)
|
||||
logger.info("Account %s linked %s account %s.", account.id, provider, open_id)
|
||||
except Exception as e:
|
||||
logging.exception("Failed to link %s account %s to Account %s", provider, open_id, account.id)
|
||||
logger.exception("Failed to link %s account %s to Account %s", provider, open_id, account.id)
|
||||
raise LinkAccountIntegrateError("Failed to link account.") from e
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -925,7 +927,7 @@ class TenantService:
|
|||
"""Create tenant member"""
|
||||
if role == TenantAccountRole.OWNER.value:
|
||||
if TenantService.has_roles(tenant, [TenantAccountRole.OWNER]):
|
||||
logging.error("Tenant %s has already an owner.", tenant.id)
|
||||
logger.error("Tenant %s has already an owner.", tenant.id)
|
||||
raise Exception("Tenant already has an owner.")
|
||||
|
||||
ta = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first()
|
||||
|
|
@ -1177,7 +1179,7 @@ class RegisterService:
|
|||
db.session.query(Tenant).delete()
|
||||
db.session.commit()
|
||||
|
||||
logging.exception("Setup account failed, email: %s, name: %s", email, name)
|
||||
logger.exception("Setup account failed, email: %s, name: %s", email, name)
|
||||
raise ValueError(f"Setup failed: {e}")
|
||||
|
||||
@classmethod
|
||||
|
|
@ -1222,15 +1224,15 @@ class RegisterService:
|
|||
db.session.commit()
|
||||
except WorkSpaceNotAllowedCreateError:
|
||||
db.session.rollback()
|
||||
logging.exception("Register failed")
|
||||
logger.exception("Register failed")
|
||||
raise AccountRegisterError("Workspace is not allowed to create.")
|
||||
except AccountRegisterError as are:
|
||||
db.session.rollback()
|
||||
logging.exception("Register failed")
|
||||
logger.exception("Register failed")
|
||||
raise are
|
||||
except Exception as e:
|
||||
db.session.rollback()
|
||||
logging.exception("Register failed")
|
||||
logger.exception("Register failed")
|
||||
raise AccountRegisterError(f"Registration failed: {e}") from e
|
||||
|
||||
return account
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ from services.feature_service import FeatureService
|
|||
from services.tag_service import TagService
|
||||
from tasks.remove_app_and_related_data_task import remove_app_and_related_data_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AppService:
|
||||
def get_paginate_apps(self, user_id: str, tenant_id: str, args: dict) -> Pagination | None:
|
||||
|
|
@ -95,7 +97,7 @@ class AppService:
|
|||
except (ProviderTokenNotInitError, LLMBadRequestError):
|
||||
model_instance = None
|
||||
except Exception as e:
|
||||
logging.exception("Get default model instance failed, tenant_id: %s", tenant_id)
|
||||
logger.exception("Get default model instance failed, tenant_id: %s", tenant_id)
|
||||
model_instance = None
|
||||
|
||||
if model_instance:
|
||||
|
|
|
|||
|
|
@ -76,6 +76,8 @@ from tasks.remove_document_from_index_task import remove_document_from_index_tas
|
|||
from tasks.retry_document_indexing_task import retry_document_indexing_task
|
||||
from tasks.sync_website_document_indexing_task import sync_website_document_indexing_task
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DatasetService:
|
||||
@staticmethod
|
||||
|
|
@ -615,7 +617,7 @@ class DatasetService:
|
|||
)
|
||||
except ProviderTokenNotInitError:
|
||||
# If we can't get the embedding model, preserve existing settings
|
||||
logging.warning(
|
||||
logger.warning(
|
||||
"Failed to initialize embedding model %s/%s, preserving existing settings",
|
||||
data["embedding_model_provider"],
|
||||
data["embedding_model"],
|
||||
|
|
@ -661,11 +663,11 @@ class DatasetService:
|
|||
@staticmethod
|
||||
def check_dataset_permission(dataset, user):
|
||||
if dataset.tenant_id != user.current_tenant_id:
|
||||
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
logger.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
raise NoPermissionError("You do not have permission to access this dataset.")
|
||||
if user.current_role != TenantAccountRole.OWNER:
|
||||
if dataset.permission == DatasetPermissionEnum.ONLY_ME and dataset.created_by != user.id:
|
||||
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
logger.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
raise NoPermissionError("You do not have permission to access this dataset.")
|
||||
if dataset.permission == DatasetPermissionEnum.PARTIAL_TEAM:
|
||||
# For partial team permission, user needs explicit permission or be the creator
|
||||
|
|
@ -674,7 +676,7 @@ class DatasetService:
|
|||
db.session.query(DatasetPermission).filter_by(dataset_id=dataset.id, account_id=user.id).first()
|
||||
)
|
||||
if not user_permission:
|
||||
logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
logger.debug("User %s does not have permission to access dataset %s", user.id, dataset.id)
|
||||
raise NoPermissionError("You do not have permission to access this dataset.")
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -1190,7 +1192,7 @@ class DocumentService:
|
|||
created_by=account.id,
|
||||
)
|
||||
else:
|
||||
logging.warning(
|
||||
logger.warning(
|
||||
"Invalid process rule mode: %s, can not find dataset process rule",
|
||||
process_rule.mode,
|
||||
)
|
||||
|
|
@ -1882,7 +1884,7 @@ class DocumentService:
|
|||
task_func.delay(*task_args)
|
||||
except Exception as e:
|
||||
# Log the error but do not rollback the transaction
|
||||
logging.exception("Error executing async task for document %s", update_info["document"].id)
|
||||
logger.exception("Error executing async task for document %s", update_info["document"].id)
|
||||
# don't raise the error immediately, but capture it for later
|
||||
propagation_error = e
|
||||
try:
|
||||
|
|
@ -1893,7 +1895,7 @@ class DocumentService:
|
|||
redis_client.setex(indexing_cache_key, 600, 1)
|
||||
except Exception as e:
|
||||
# Log the error but do not rollback the transaction
|
||||
logging.exception("Error setting cache for document %s", update_info["document"].id)
|
||||
logger.exception("Error setting cache for document %s", update_info["document"].id)
|
||||
# Raise any propagation error after all updates
|
||||
if propagation_error:
|
||||
raise propagation_error
|
||||
|
|
@ -2059,7 +2061,7 @@ class SegmentService:
|
|||
try:
|
||||
VectorService.create_segments_vector([args["keywords"]], [segment_document], dataset, document.doc_form)
|
||||
except Exception as e:
|
||||
logging.exception("create segment index failed")
|
||||
logger.exception("create segment index failed")
|
||||
segment_document.enabled = False
|
||||
segment_document.disabled_at = naive_utc_now()
|
||||
segment_document.status = "error"
|
||||
|
|
@ -2142,7 +2144,7 @@ class SegmentService:
|
|||
# save vector index
|
||||
VectorService.create_segments_vector(keywords_list, pre_segment_data_list, dataset, document.doc_form)
|
||||
except Exception as e:
|
||||
logging.exception("create segment index failed")
|
||||
logger.exception("create segment index failed")
|
||||
for segment_document in segment_data_list:
|
||||
segment_document.enabled = False
|
||||
segment_document.disabled_at = naive_utc_now()
|
||||
|
|
@ -2314,7 +2316,7 @@ class SegmentService:
|
|||
VectorService.update_segment_vector(args.keywords, segment, dataset)
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("update segment index failed")
|
||||
logger.exception("update segment index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = naive_utc_now()
|
||||
segment.status = "error"
|
||||
|
|
@ -2476,7 +2478,7 @@ class SegmentService:
|
|||
try:
|
||||
VectorService.create_child_chunk_vector(child_chunk, dataset)
|
||||
except Exception as e:
|
||||
logging.exception("create child chunk index failed")
|
||||
logger.exception("create child chunk index failed")
|
||||
db.session.rollback()
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
db.session.commit()
|
||||
|
|
@ -2551,7 +2553,7 @@ class SegmentService:
|
|||
VectorService.update_child_chunk_vector(new_child_chunks, update_child_chunks, delete_child_chunks, dataset)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("update child chunk index failed")
|
||||
logger.exception("update child chunk index failed")
|
||||
db.session.rollback()
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
return sorted(new_child_chunks + update_child_chunks, key=lambda x: x.position)
|
||||
|
|
@ -2575,7 +2577,7 @@ class SegmentService:
|
|||
VectorService.update_child_chunk_vector([], [child_chunk], [], dataset)
|
||||
db.session.commit()
|
||||
except Exception as e:
|
||||
logging.exception("update child chunk index failed")
|
||||
logger.exception("update child chunk index failed")
|
||||
db.session.rollback()
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
return child_chunk
|
||||
|
|
@ -2586,7 +2588,7 @@ class SegmentService:
|
|||
try:
|
||||
VectorService.delete_child_chunk_vector(child_chunk, dataset)
|
||||
except Exception as e:
|
||||
logging.exception("delete child chunk index failed")
|
||||
logger.exception("delete child chunk index failed")
|
||||
db.session.rollback()
|
||||
raise ChildChunkDeleteIndexError(str(e))
|
||||
db.session.commit()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_database import db
|
|||
from models.account import Account
|
||||
from models.dataset import Dataset, DatasetQuery
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_retrieval_model = {
|
||||
"search_method": RetrievalMethod.SEMANTIC_SEARCH.value,
|
||||
"reranking_enable": False,
|
||||
|
|
@ -77,7 +79,7 @@ class HitTestingService:
|
|||
)
|
||||
|
||||
end = time.perf_counter()
|
||||
logging.debug("Hit testing retrieve in %s seconds", end - start)
|
||||
logger.debug("Hit testing retrieve in %s seconds", end - start)
|
||||
|
||||
dataset_query = DatasetQuery(
|
||||
dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id
|
||||
|
|
@ -113,7 +115,7 @@ class HitTestingService:
|
|||
)
|
||||
|
||||
end = time.perf_counter()
|
||||
logging.debug("External knowledge hit testing retrieve in %s seconds", end - start)
|
||||
logger.debug("External knowledge hit testing retrieve in %s seconds", end - start)
|
||||
|
||||
dataset_query = DatasetQuery(
|
||||
dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id
|
||||
|
|
|
|||
|
|
@ -15,6 +15,8 @@ from services.entities.knowledge_entities.knowledge_entities import (
|
|||
MetadataOperationData,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetadataService:
|
||||
@staticmethod
|
||||
|
|
@ -90,7 +92,7 @@ class MetadataService:
|
|||
db.session.commit()
|
||||
return metadata # type: ignore
|
||||
except Exception:
|
||||
logging.exception("Update metadata name failed")
|
||||
logger.exception("Update metadata name failed")
|
||||
finally:
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
|
|
@ -122,7 +124,7 @@ class MetadataService:
|
|||
db.session.commit()
|
||||
return metadata
|
||||
except Exception:
|
||||
logging.exception("Delete metadata failed")
|
||||
logger.exception("Delete metadata failed")
|
||||
finally:
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
|
|
@ -161,7 +163,7 @@ class MetadataService:
|
|||
dataset.built_in_field_enabled = True
|
||||
db.session.commit()
|
||||
except Exception:
|
||||
logging.exception("Enable built-in field failed")
|
||||
logger.exception("Enable built-in field failed")
|
||||
finally:
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
|
|
@ -192,7 +194,7 @@ class MetadataService:
|
|||
dataset.built_in_field_enabled = False
|
||||
db.session.commit()
|
||||
except Exception:
|
||||
logging.exception("Disable built-in field failed")
|
||||
logger.exception("Disable built-in field failed")
|
||||
finally:
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
|
|
@ -230,7 +232,7 @@ class MetadataService:
|
|||
db.session.add(dataset_metadata_binding)
|
||||
db.session.commit()
|
||||
except Exception:
|
||||
logging.exception("Update documents metadata failed")
|
||||
logger.exception("Update documents metadata failed")
|
||||
finally:
|
||||
redis_client.delete(lock_key)
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from models.dataset import ChildChunk, Dataset, DatasetProcessRule, DocumentSegm
|
|||
from models.dataset import Document as DatasetDocument
|
||||
from services.entities.knowledge_entities.knowledge_entities import ParentMode
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VectorService:
|
||||
|
|
@ -27,7 +27,7 @@ class VectorService:
|
|||
if doc_form == IndexType.PARENT_CHILD_INDEX:
|
||||
dataset_document = db.session.query(DatasetDocument).filter_by(id=segment.document_id).first()
|
||||
if not dataset_document:
|
||||
_logger.warning(
|
||||
logger.warning(
|
||||
"Expected DatasetDocument record to exist, but none was found, document_id=%s, segment_id=%s",
|
||||
segment.document_id,
|
||||
segment.id,
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ from models.enums import DraftVariableType
|
|||
from models.workflow import Workflow, WorkflowDraftVariable, is_system_variable_editable
|
||||
from repositories.factory import DifyAPIRepositoryFactory
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
|
|
@ -242,7 +242,7 @@ class WorkflowDraftVariableService:
|
|||
if conv_var is None:
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
_logger.warning(
|
||||
logger.warning(
|
||||
"Conversation variable not found for draft variable, id=%s, name=%s", variable.id, variable.name
|
||||
)
|
||||
return None
|
||||
|
|
@ -263,12 +263,12 @@ class WorkflowDraftVariableService:
|
|||
if variable.node_execution_id is None:
|
||||
self._session.delete(instance=variable)
|
||||
self._session.flush()
|
||||
_logger.warning("draft variable has no node_execution_id, id=%s, name=%s", variable.id, variable.name)
|
||||
logger.warning("draft variable has no node_execution_id, id=%s, name=%s", variable.id, variable.name)
|
||||
return None
|
||||
|
||||
node_exec = self._api_node_execution_repo.get_execution_by_id(variable.node_execution_id)
|
||||
if node_exec is None:
|
||||
_logger.warning(
|
||||
logger.warning(
|
||||
"Node exectution not found for draft variable, id=%s, name=%s, node_execution_id=%s",
|
||||
variable.id,
|
||||
variable.name,
|
||||
|
|
@ -351,7 +351,7 @@ class WorkflowDraftVariableService:
|
|||
return None
|
||||
segment = draft_var.get_value()
|
||||
if not isinstance(segment, StringSegment):
|
||||
_logger.warning(
|
||||
logger.warning(
|
||||
"sys.conversation_id variable is not a string: app_id=%s, id=%s",
|
||||
app_id,
|
||||
draft_var.id,
|
||||
|
|
@ -681,7 +681,7 @@ class DraftVariableSaver:
|
|||
draft_vars = []
|
||||
for name, value in output.items():
|
||||
if not self._should_variable_be_saved(name):
|
||||
_logger.debug(
|
||||
logger.debug(
|
||||
"Skip saving variable as it has been excluded by its node_type, name=%s, node_type=%s",
|
||||
name,
|
||||
self._node_type,
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import DatasetAutoDisableLog, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def add_document_to_index_task(dataset_document_id: str):
|
||||
|
|
@ -22,12 +24,12 @@ def add_document_to_index_task(dataset_document_id: str):
|
|||
|
||||
Usage: add_document_to_index_task.delay(dataset_document_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start add document to index: {dataset_document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start add document to index: {dataset_document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
dataset_document = db.session.query(DatasetDocument).where(DatasetDocument.id == dataset_document_id).first()
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Document not found: {dataset_document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document not found: {dataset_document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -101,11 +103,11 @@ def add_document_to_index_task(dataset_document_id: str):
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Document added to index: {dataset_document.id} latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception("add document to index failed")
|
||||
logger.exception("add document to index failed")
|
||||
dataset_document.enabled = False
|
||||
dataset_document.disabled_at = naive_utc_now()
|
||||
dataset_document.indexing_status = "error"
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from extensions.ext_database import db
|
|||
from models.dataset import Dataset
|
||||
from services.dataset_service import DatasetCollectionBindingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def add_annotation_to_index_task(
|
||||
|
|
@ -25,7 +27,7 @@ def add_annotation_to_index_task(
|
|||
|
||||
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
|
||||
"""
|
||||
logging.info(click.style(f"Start build index for annotation: {annotation_id}", fg="green"))
|
||||
logger.info(click.style(f"Start build index for annotation: {annotation_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -48,13 +50,13 @@ def add_annotation_to_index_task(
|
|||
vector.create([document], duplicate_check=True)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Build index successful for annotation: {annotation_id} latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Build index for annotation failed")
|
||||
logger.exception("Build index for annotation failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from models.dataset import Dataset
|
|||
from models.model import App, AppAnnotationSetting, MessageAnnotation
|
||||
from services.dataset_service import DatasetCollectionBindingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id: str, tenant_id: str, user_id: str):
|
||||
|
|
@ -25,7 +27,7 @@ def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id:
|
|||
:param user_id: user_id
|
||||
|
||||
"""
|
||||
logging.info(click.style(f"Start batch import annotation: {job_id}", fg="green"))
|
||||
logger.info(click.style(f"Start batch import annotation: {job_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
indexing_cache_key = f"app_annotation_batch_import_{str(job_id)}"
|
||||
# get app info
|
||||
|
|
@ -74,7 +76,7 @@ def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id:
|
|||
db.session.commit()
|
||||
redis_client.setex(indexing_cache_key, 600, "completed")
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
"Build index successful for batch import annotation: {} latency: {}".format(
|
||||
job_id, end_at - start_at
|
||||
|
|
@ -87,6 +89,6 @@ def batch_import_annotations_task(job_id: str, content_list: list[dict], app_id:
|
|||
redis_client.setex(indexing_cache_key, 600, "error")
|
||||
indexing_error_msg_key = f"app_annotation_batch_import_error_msg_{str(job_id)}"
|
||||
redis_client.setex(indexing_error_msg_key, 600, str(e))
|
||||
logging.exception("Build index for batch import annotations failed")
|
||||
logger.exception("Build index for batch import annotations failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -9,13 +9,15 @@ from extensions.ext_database import db
|
|||
from models.dataset import Dataset
|
||||
from services.dataset_service import DatasetCollectionBindingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def delete_annotation_index_task(annotation_id: str, app_id: str, tenant_id: str, collection_binding_id: str):
|
||||
"""
|
||||
Async delete annotation index task
|
||||
"""
|
||||
logging.info(click.style(f"Start delete app annotation index: {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Start delete app annotation index: {app_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
try:
|
||||
dataset_collection_binding = DatasetCollectionBindingService.get_dataset_collection_binding_by_id_and_type(
|
||||
|
|
@ -33,10 +35,10 @@ def delete_annotation_index_task(annotation_id: str, app_id: str, tenant_id: str
|
|||
vector = Vector(dataset, attributes=["doc_id", "annotation_id", "app_id"])
|
||||
vector.delete_by_metadata_field("annotation_id", annotation_id)
|
||||
except Exception:
|
||||
logging.exception("Delete annotation index failed when annotation deleted.")
|
||||
logger.exception("Delete annotation index failed when annotation deleted.")
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"App annotations index deleted : {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"App annotations index deleted : {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("Annotation deleted index failed")
|
||||
logger.exception("Annotation deleted index failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -10,26 +10,28 @@ from extensions.ext_redis import redis_client
|
|||
from models.dataset import Dataset
|
||||
from models.model import App, AppAnnotationSetting, MessageAnnotation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def disable_annotation_reply_task(job_id: str, app_id: str, tenant_id: str):
|
||||
"""
|
||||
Async enable annotation reply task
|
||||
"""
|
||||
logging.info(click.style(f"Start delete app annotations index: {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Start delete app annotations index: {app_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
# get app info
|
||||
app = db.session.query(App).where(App.id == app_id, App.tenant_id == tenant_id, App.status == "normal").first()
|
||||
annotations_count = db.session.query(MessageAnnotation).where(MessageAnnotation.app_id == app_id).count()
|
||||
if not app:
|
||||
logging.info(click.style(f"App not found: {app_id}", fg="red"))
|
||||
logger.info(click.style(f"App not found: {app_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
app_annotation_setting = db.session.query(AppAnnotationSetting).where(AppAnnotationSetting.app_id == app_id).first()
|
||||
|
||||
if not app_annotation_setting:
|
||||
logging.info(click.style(f"App annotation setting not found: {app_id}", fg="red"))
|
||||
logger.info(click.style(f"App annotation setting not found: {app_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -49,7 +51,7 @@ def disable_annotation_reply_task(job_id: str, app_id: str, tenant_id: str):
|
|||
vector = Vector(dataset, attributes=["doc_id", "annotation_id", "app_id"])
|
||||
vector.delete()
|
||||
except Exception:
|
||||
logging.exception("Delete annotation index failed when annotation deleted.")
|
||||
logger.exception("Delete annotation index failed when annotation deleted.")
|
||||
redis_client.setex(disable_app_annotation_job_key, 600, "completed")
|
||||
|
||||
# delete annotation setting
|
||||
|
|
@ -57,9 +59,9 @@ def disable_annotation_reply_task(job_id: str, app_id: str, tenant_id: str):
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"App annotations index deleted : {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"App annotations index deleted : {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("Annotation batch deleted index failed")
|
||||
logger.exception("Annotation batch deleted index failed")
|
||||
redis_client.setex(disable_app_annotation_job_key, 600, "error")
|
||||
disable_app_annotation_error_key = f"disable_app_annotation_error_{str(job_id)}"
|
||||
redis_client.setex(disable_app_annotation_error_key, 600, str(e))
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from models.dataset import Dataset
|
|||
from models.model import App, AppAnnotationSetting, MessageAnnotation
|
||||
from services.dataset_service import DatasetCollectionBindingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def enable_annotation_reply_task(
|
||||
|
|
@ -27,13 +29,13 @@ def enable_annotation_reply_task(
|
|||
"""
|
||||
Async enable annotation reply task
|
||||
"""
|
||||
logging.info(click.style(f"Start add app annotation to index: {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Start add app annotation to index: {app_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
# get app info
|
||||
app = db.session.query(App).where(App.id == app_id, App.tenant_id == tenant_id, App.status == "normal").first()
|
||||
|
||||
if not app:
|
||||
logging.info(click.style(f"App not found: {app_id}", fg="red"))
|
||||
logger.info(click.style(f"App not found: {app_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -68,7 +70,7 @@ def enable_annotation_reply_task(
|
|||
try:
|
||||
old_vector.delete()
|
||||
except Exception as e:
|
||||
logging.info(click.style(f"Delete annotation index error: {str(e)}", fg="red"))
|
||||
logger.info(click.style(f"Delete annotation index error: {str(e)}", fg="red"))
|
||||
annotation_setting.score_threshold = score_threshold
|
||||
annotation_setting.collection_binding_id = dataset_collection_binding.id
|
||||
annotation_setting.updated_user_id = user_id
|
||||
|
|
@ -104,14 +106,14 @@ def enable_annotation_reply_task(
|
|||
try:
|
||||
vector.delete_by_metadata_field("app_id", app_id)
|
||||
except Exception as e:
|
||||
logging.info(click.style(f"Delete annotation index error: {str(e)}", fg="red"))
|
||||
logger.info(click.style(f"Delete annotation index error: {str(e)}", fg="red"))
|
||||
vector.create(documents)
|
||||
db.session.commit()
|
||||
redis_client.setex(enable_app_annotation_job_key, 600, "completed")
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"App annotations added to index: {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"App annotations added to index: {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("Annotation batch created index failed")
|
||||
logger.exception("Annotation batch created index failed")
|
||||
redis_client.setex(enable_app_annotation_job_key, 600, "error")
|
||||
enable_app_annotation_error_key = f"enable_app_annotation_error_{str(job_id)}"
|
||||
redis_client.setex(enable_app_annotation_error_key, 600, str(e))
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from extensions.ext_database import db
|
|||
from models.dataset import Dataset
|
||||
from services.dataset_service import DatasetCollectionBindingService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def update_annotation_to_index_task(
|
||||
|
|
@ -25,7 +27,7 @@ def update_annotation_to_index_task(
|
|||
|
||||
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
|
||||
"""
|
||||
logging.info(click.style(f"Start update index for annotation: {annotation_id}", fg="green"))
|
||||
logger.info(click.style(f"Start update index for annotation: {annotation_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -49,13 +51,13 @@ def update_annotation_to_index_task(
|
|||
vector.delete_by_metadata_field("annotation_id", annotation_id)
|
||||
vector.add_texts([document])
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Build index successful for annotation: {annotation_id} latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Build index for annotation failed")
|
||||
logger.exception("Build index for annotation failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ from extensions.ext_storage import storage
|
|||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.model import UploadFile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def batch_clean_document_task(document_ids: list[str], dataset_id: str, doc_form: str, file_ids: list[str]):
|
||||
|
|
@ -23,7 +25,7 @@ def batch_clean_document_task(document_ids: list[str], dataset_id: str, doc_form
|
|||
|
||||
Usage: batch_clean_document_task.delay(document_ids, dataset_id)
|
||||
"""
|
||||
logging.info(click.style("Start batch clean documents when documents deleted", fg="green"))
|
||||
logger.info(click.style("Start batch clean documents when documents deleted", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -47,7 +49,7 @@ def batch_clean_document_task(document_ids: list[str], dataset_id: str, doc_form
|
|||
if image_file and image_file.key:
|
||||
storage.delete(image_file.key)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Delete image_files failed when storage deleted, \
|
||||
image_upload_file_is: %s",
|
||||
upload_file_id,
|
||||
|
|
@ -62,18 +64,18 @@ def batch_clean_document_task(document_ids: list[str], dataset_id: str, doc_form
|
|||
try:
|
||||
storage.delete(file.key)
|
||||
except Exception:
|
||||
logging.exception("Delete file failed when document deleted, file_id: %s", file.id)
|
||||
logger.exception("Delete file failed when document deleted, file_id: %s", file.id)
|
||||
db.session.delete(file)
|
||||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Cleaned documents when documents deleted latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Cleaned documents when documents deleted failed")
|
||||
logger.exception("Cleaned documents when documents deleted failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -21,6 +21,8 @@ from models.dataset import Dataset, Document, DocumentSegment
|
|||
from models.model import UploadFile
|
||||
from services.vector_service import VectorService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def batch_create_segment_to_index_task(
|
||||
|
|
@ -42,7 +44,7 @@ def batch_create_segment_to_index_task(
|
|||
|
||||
Usage: batch_create_segment_to_index_task.delay(job_id, upload_file_id, dataset_id, document_id, tenant_id, user_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start batch create segment jobId: {job_id}", fg="green"))
|
||||
logger.info(click.style(f"Start batch create segment jobId: {job_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
indexing_cache_key = f"segment_batch_import_{job_id}"
|
||||
|
|
@ -142,14 +144,14 @@ def batch_create_segment_to_index_task(
|
|||
db.session.commit()
|
||||
redis_client.setex(indexing_cache_key, 600, "completed")
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Segment batch created job: {job_id} latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Segments batch created index failed")
|
||||
logger.exception("Segments batch created index failed")
|
||||
redis_client.setex(indexing_cache_key, 600, "error")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -20,6 +20,8 @@ from models.dataset import (
|
|||
)
|
||||
from models.model import UploadFile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Add import statement for ValueError
|
||||
@shared_task(queue="dataset")
|
||||
|
|
@ -42,7 +44,7 @@ def clean_dataset_task(
|
|||
|
||||
Usage: clean_dataset_task.delay(dataset_id, tenant_id, indexing_technique, index_struct)
|
||||
"""
|
||||
logging.info(click.style(f"Start clean dataset when dataset deleted: {dataset_id}", fg="green"))
|
||||
logger.info(click.style(f"Start clean dataset when dataset deleted: {dataset_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -63,7 +65,7 @@ def clean_dataset_task(
|
|||
from core.rag.index_processor.constant.index_type import IndexType
|
||||
|
||||
doc_form = IndexType.PARAGRAPH_INDEX
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Invalid doc_form detected, using default index type for cleanup: {doc_form}", fg="yellow")
|
||||
)
|
||||
|
||||
|
|
@ -72,18 +74,18 @@ def clean_dataset_task(
|
|||
try:
|
||||
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
|
||||
index_processor.clean(dataset, None, with_keywords=True, delete_child_chunks=True)
|
||||
logging.info(click.style(f"Successfully cleaned vector database for dataset: {dataset_id}", fg="green"))
|
||||
logger.info(click.style(f"Successfully cleaned vector database for dataset: {dataset_id}", fg="green"))
|
||||
except Exception as index_cleanup_error:
|
||||
logging.exception(click.style(f"Failed to clean vector database for dataset {dataset_id}", fg="red"))
|
||||
logger.exception(click.style(f"Failed to clean vector database for dataset {dataset_id}", fg="red"))
|
||||
# Continue with document and segment deletion even if vector cleanup fails
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Continuing with document and segment deletion for dataset: {dataset_id}", fg="yellow")
|
||||
)
|
||||
|
||||
if documents is None or len(documents) == 0:
|
||||
logging.info(click.style(f"No documents found for dataset: {dataset_id}", fg="green"))
|
||||
logger.info(click.style(f"No documents found for dataset: {dataset_id}", fg="green"))
|
||||
else:
|
||||
logging.info(click.style(f"Cleaning documents for dataset: {dataset_id}", fg="green"))
|
||||
logger.info(click.style(f"Cleaning documents for dataset: {dataset_id}", fg="green"))
|
||||
|
||||
for document in documents:
|
||||
db.session.delete(document)
|
||||
|
|
@ -97,7 +99,7 @@ def clean_dataset_task(
|
|||
try:
|
||||
storage.delete(image_file.key)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Delete image_files failed when storage deleted, \
|
||||
image_upload_file_is: %s",
|
||||
upload_file_id,
|
||||
|
|
@ -134,7 +136,7 @@ def clean_dataset_task(
|
|||
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Cleaned dataset when dataset deleted: {dataset_id} latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
except Exception:
|
||||
|
|
@ -142,10 +144,10 @@ def clean_dataset_task(
|
|||
# This ensures the database session is properly cleaned up
|
||||
try:
|
||||
db.session.rollback()
|
||||
logging.info(click.style(f"Rolled back database session for dataset: {dataset_id}", fg="yellow"))
|
||||
logger.info(click.style(f"Rolled back database session for dataset: {dataset_id}", fg="yellow"))
|
||||
except Exception as rollback_error:
|
||||
logging.exception("Failed to rollback database session")
|
||||
logger.exception("Failed to rollback database session")
|
||||
|
||||
logging.exception("Cleaned dataset when dataset deleted failed")
|
||||
logger.exception("Cleaned dataset when dataset deleted failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_storage import storage
|
|||
from models.dataset import Dataset, DatasetMetadataBinding, DocumentSegment
|
||||
from models.model import UploadFile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_id: Optional[str]):
|
||||
|
|
@ -24,7 +26,7 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i
|
|||
|
||||
Usage: clean_document_task.delay(document_id, dataset_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start clean document when document deleted: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start clean document when document deleted: {document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -49,7 +51,7 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i
|
|||
try:
|
||||
storage.delete(image_file.key)
|
||||
except Exception:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"Delete image_files failed when storage deleted, \
|
||||
image_upload_file_is: %s",
|
||||
upload_file_id,
|
||||
|
|
@ -64,7 +66,7 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i
|
|||
try:
|
||||
storage.delete(file.key)
|
||||
except Exception:
|
||||
logging.exception("Delete file failed when document deleted, file_id: %s", file_id)
|
||||
logger.exception("Delete file failed when document deleted, file_id: %s", file_id)
|
||||
db.session.delete(file)
|
||||
db.session.commit()
|
||||
|
||||
|
|
@ -76,13 +78,13 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Cleaned document when document deleted: {document_id} latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document deleted failed")
|
||||
logger.exception("Cleaned document when document deleted failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ from core.rag.index_processor.index_processor_factory import IndexProcessorFacto
|
|||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def clean_notion_document_task(document_ids: list[str], dataset_id: str):
|
||||
|
|
@ -18,9 +20,7 @@ def clean_notion_document_task(document_ids: list[str], dataset_id: str):
|
|||
|
||||
Usage: clean_notion_document_task.delay(document_ids, dataset_id)
|
||||
"""
|
||||
logging.info(
|
||||
click.style(f"Start clean document when import form notion document deleted: {dataset_id}", fg="green")
|
||||
)
|
||||
logger.info(click.style(f"Start clean document when import form notion document deleted: {dataset_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -43,7 +43,7 @@ def clean_notion_document_task(document_ids: list[str], dataset_id: str):
|
|||
db.session.delete(segment)
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
"Clean document when import form notion document deleted end :: {} latency: {}".format(
|
||||
dataset_id, end_at - start_at
|
||||
|
|
@ -52,6 +52,6 @@ def clean_notion_document_task(document_ids: list[str], dataset_id: str):
|
|||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when import form notion document deleted failed")
|
||||
logger.exception("Cleaned document when import form notion document deleted failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_redis import redis_client
|
|||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]] = None):
|
||||
|
|
@ -21,12 +23,12 @@ def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]]
|
|||
:param keywords:
|
||||
Usage: create_segment_to_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start create segment to index: {segment_id}", fg="green"))
|
||||
logger.info(click.style(f"Start create segment to index: {segment_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).where(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
logging.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
logger.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -58,17 +60,17 @@ def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]]
|
|||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
index_type = dataset.doc_form
|
||||
|
|
@ -85,9 +87,9 @@ def create_segment_to_index_task(segment_id: str, keywords: Optional[list[str]]
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segment created to index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segment created to index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("create segment to index failed")
|
||||
logger.exception("create segment to index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = naive_utc_now()
|
||||
segment.status = "error"
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_database import db
|
|||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def deal_dataset_vector_index_task(dataset_id: str, action: Literal["remove", "add", "update"]):
|
||||
|
|
@ -21,7 +23,7 @@ def deal_dataset_vector_index_task(dataset_id: str, action: Literal["remove", "a
|
|||
:param action: action
|
||||
Usage: deal_dataset_vector_index_task.delay(dataset_id, action)
|
||||
"""
|
||||
logging.info(click.style(f"Start deal dataset vector index: {dataset_id}", fg="green"))
|
||||
logger.info(click.style(f"Start deal dataset vector index: {dataset_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -163,8 +165,8 @@ def deal_dataset_vector_index_task(dataset_id: str, action: Literal["remove", "a
|
|||
index_processor.clean(dataset, None, with_keywords=False, delete_child_chunks=False)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Deal dataset vector index: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Deal dataset vector index: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Deal dataset vector index failed")
|
||||
logger.exception("Deal dataset vector index failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from models.model import Message, MessageAnnotation, MessageFeedback
|
|||
from models.tools import ToolConversationVariables, ToolFile
|
||||
from models.web import PinnedConversation
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="conversation")
|
||||
def delete_conversation_related_data(conversation_id: str) -> None:
|
||||
|
|
@ -20,7 +22,7 @@ def delete_conversation_related_data(conversation_id: str) -> None:
|
|||
conversation_id: conversation Id
|
||||
"""
|
||||
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Starting to delete conversation data from db for conversation_id {conversation_id}", fg="green")
|
||||
)
|
||||
start_at = time.perf_counter()
|
||||
|
|
@ -53,7 +55,7 @@ def delete_conversation_related_data(conversation_id: str) -> None:
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Succeeded cleaning data from db for conversation_id {conversation_id} latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
|
|
@ -61,7 +63,7 @@ def delete_conversation_related_data(conversation_id: str) -> None:
|
|||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.exception("Failed to delete data from db for conversation_id: %s failed", conversation_id)
|
||||
logger.exception("Failed to delete data from db for conversation_id: %s failed", conversation_id)
|
||||
db.session.rollback()
|
||||
raise e
|
||||
finally:
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ from core.rag.index_processor.index_processor_factory import IndexProcessorFacto
|
|||
from extensions.ext_database import db
|
||||
from models.dataset import Dataset, Document
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def delete_segment_from_index_task(index_node_ids: list, dataset_id: str, document_id: str):
|
||||
|
|
@ -19,7 +21,7 @@ def delete_segment_from_index_task(index_node_ids: list, dataset_id: str, docume
|
|||
|
||||
Usage: delete_segment_from_index_task.delay(index_node_ids, dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style("Start delete segment from index", fg="green"))
|
||||
logger.info(click.style("Start delete segment from index", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
try:
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
|
|
@ -38,8 +40,8 @@ def delete_segment_from_index_task(index_node_ids: list, dataset_id: str, docume
|
|||
index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=True)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segment deleted from index latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segment deleted from index latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("delete segment from index failed")
|
||||
logger.exception("delete segment from index failed")
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ from extensions.ext_database import db
|
|||
from extensions.ext_redis import redis_client
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def disable_segment_from_index_task(segment_id: str):
|
||||
|
|
@ -18,17 +20,17 @@ def disable_segment_from_index_task(segment_id: str):
|
|||
|
||||
Usage: disable_segment_from_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start disable segment from index: {segment_id}", fg="green"))
|
||||
logger.info(click.style(f"Start disable segment from index: {segment_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).where(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
logging.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
logger.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
if segment.status != "completed":
|
||||
logging.info(click.style(f"Segment is not completed, disable is not allowed: {segment_id}", fg="red"))
|
||||
logger.info(click.style(f"Segment is not completed, disable is not allowed: {segment_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -38,17 +40,17 @@ def disable_segment_from_index_task(segment_id: str):
|
|||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
index_type = dataset_document.doc_form
|
||||
|
|
@ -56,9 +58,9 @@ def disable_segment_from_index_task(segment_id: str):
|
|||
index_processor.clean(dataset, [segment.index_node_id])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segment removed from index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segment removed from index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("remove segment from index failed")
|
||||
logger.exception("remove segment from index failed")
|
||||
segment.enabled = True
|
||||
db.session.commit()
|
||||
finally:
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from extensions.ext_redis import redis_client
|
|||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def disable_segments_from_index_task(segment_ids: list, dataset_id: str, document_id: str):
|
||||
|
|
@ -25,18 +27,18 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen
|
|||
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Dataset {dataset_id} not found, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Dataset {dataset_id} not found, pass.", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
dataset_document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first()
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Document {document_id} not found, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Document {document_id} not found, pass.", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Document {document_id} status is invalid, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Document {document_id} status is invalid, pass.", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
# sync index processor
|
||||
|
|
@ -61,7 +63,7 @@ def disable_segments_from_index_task(segment_ids: list, dataset_id: str, documen
|
|||
index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=False)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segments removed from index latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segments removed from index latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
# update segment error msg
|
||||
db.session.query(DocumentSegment).where(
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from models.source import DataSourceOauthBinding
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def document_indexing_sync_task(dataset_id: str, document_id: str):
|
||||
|
|
@ -22,13 +24,13 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
|||
|
||||
Usage: document_indexing_sync_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start sync document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start sync document: {document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -93,7 +95,7 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
|||
db.session.delete(segment)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
"Cleaned document when document update data source or process rule: {} latency: {}".format(
|
||||
document_id, end_at - start_at
|
||||
|
|
@ -102,16 +104,16 @@ def document_indexing_sync_task(dataset_id: str, document_id: str):
|
|||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document update data source or process rule failed")
|
||||
logger.exception("Cleaned document when document update data source or process rule failed")
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run([document])
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"update document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"update document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
logging.exception("document_indexing_sync_task failed, document_id: %s", document_id)
|
||||
logger.exception("document_indexing_sync_task failed, document_id: %s", document_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, Document
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def document_indexing_task(dataset_id: str, document_ids: list):
|
||||
|
|
@ -26,7 +28,7 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
|||
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Dataset is not found: {dataset_id}", fg="yellow"))
|
||||
logger.info(click.style(f"Dataset is not found: {dataset_id}", fg="yellow"))
|
||||
db.session.close()
|
||||
return
|
||||
# check document limit
|
||||
|
|
@ -60,7 +62,7 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
|||
return
|
||||
|
||||
for document_id in document_ids:
|
||||
logging.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
|
||||
document = (
|
||||
db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
|
|
@ -77,10 +79,10 @@ def document_indexing_task(dataset_id: str, document_ids: list):
|
|||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(documents)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
logging.exception("Document indexing task failed, dataset_id: %s", dataset_id)
|
||||
logger.exception("Document indexing task failed, dataset_id: %s", dataset_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from extensions.ext_database import db
|
|||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def document_indexing_update_task(dataset_id: str, document_id: str):
|
||||
|
|
@ -20,13 +22,13 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
|
|||
|
||||
Usage: document_indexing_update_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start update document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start update document: {document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -54,7 +56,7 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
|
|||
db.session.delete(segment)
|
||||
db.session.commit()
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
"Cleaned document when document update data source or process rule: {} latency: {}".format(
|
||||
document_id, end_at - start_at
|
||||
|
|
@ -63,16 +65,16 @@ def document_indexing_update_task(dataset_id: str, document_id: str):
|
|||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Cleaned document when document update data source or process rule failed")
|
||||
logger.exception("Cleaned document when document update data source or process rule failed")
|
||||
|
||||
try:
|
||||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run([document])
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"update document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"update document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
logging.exception("document_indexing_update_task failed, document_id: %s", document_id)
|
||||
logger.exception("document_indexing_update_task failed, document_id: %s", document_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
|
||||
|
|
@ -27,7 +29,7 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
|
|||
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if dataset is None:
|
||||
logging.info(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
logger.info(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -63,7 +65,7 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
|
|||
db.session.close()
|
||||
|
||||
for document_id in document_ids:
|
||||
logging.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start process document: {document_id}", fg="green"))
|
||||
|
||||
document = (
|
||||
db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
|
|
@ -95,10 +97,10 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
|
|||
indexing_runner = IndexingRunner()
|
||||
indexing_runner.run(documents)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Processed dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
logging.exception("duplicate_document_indexing_task failed, dataset_id: %s", dataset_id)
|
||||
logger.exception("duplicate_document_indexing_task failed, dataset_id: %s", dataset_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_redis import redis_client
|
|||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def enable_segment_to_index_task(segment_id: str):
|
||||
|
|
@ -21,17 +23,17 @@ def enable_segment_to_index_task(segment_id: str):
|
|||
|
||||
Usage: enable_segment_to_index_task.delay(segment_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start enable segment to index: {segment_id}", fg="green"))
|
||||
logger.info(click.style(f"Start enable segment to index: {segment_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
segment = db.session.query(DocumentSegment).where(DocumentSegment.id == segment_id).first()
|
||||
if not segment:
|
||||
logging.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
logger.info(click.style(f"Segment not found: {segment_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
if segment.status != "completed":
|
||||
logging.info(click.style(f"Segment is not completed, enable is not allowed: {segment_id}", fg="red"))
|
||||
logger.info(click.style(f"Segment is not completed, enable is not allowed: {segment_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -51,17 +53,17 @@ def enable_segment_to_index_task(segment_id: str):
|
|||
dataset = segment.dataset
|
||||
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no dataset, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
dataset_document = segment.document
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} has no document, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Segment {segment.id} document status is invalid, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
index_processor = IndexProcessorFactory(dataset_document.doc_form).init_index_processor()
|
||||
|
|
@ -85,9 +87,9 @@ def enable_segment_to_index_task(segment_id: str):
|
|||
index_processor.load(dataset, [document])
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segment enabled to index: {segment.id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("enable segment to index failed")
|
||||
logger.exception("enable segment to index failed")
|
||||
segment.enabled = False
|
||||
segment.disabled_at = naive_utc_now()
|
||||
segment.status = "error"
|
||||
|
|
|
|||
|
|
@ -13,6 +13,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, DocumentSegment
|
||||
from models.dataset import Document as DatasetDocument
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_id: str):
|
||||
|
|
@ -27,17 +29,17 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i
|
|||
start_at = time.perf_counter()
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Dataset {dataset_id} not found, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Dataset {dataset_id} not found, pass.", fg="cyan"))
|
||||
return
|
||||
|
||||
dataset_document = db.session.query(DatasetDocument).where(DatasetDocument.id == document_id).first()
|
||||
|
||||
if not dataset_document:
|
||||
logging.info(click.style(f"Document {document_id} not found, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Document {document_id} not found, pass.", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Document {document_id} status is invalid, pass.", fg="cyan"))
|
||||
logger.info(click.style(f"Document {document_id} status is invalid, pass.", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
# sync index processor
|
||||
|
|
@ -53,7 +55,7 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i
|
|||
.all()
|
||||
)
|
||||
if not segments:
|
||||
logging.info(click.style(f"Segments not found: {segment_ids}", fg="cyan"))
|
||||
logger.info(click.style(f"Segments not found: {segment_ids}", fg="cyan"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -91,9 +93,9 @@ def enable_segments_to_index_task(segment_ids: list, dataset_id: str, document_i
|
|||
index_processor.load(dataset, documents)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Segments enabled to index latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception("enable segments to index failed")
|
||||
logger.exception("enable segments to index failed")
|
||||
# update segment error msg
|
||||
db.session.query(DocumentSegment).where(
|
||||
DocumentSegment.id.in_(segment_ids),
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from celery import shared_task
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_deletion_success_task(to: str, language: str = "en-US") -> None:
|
||||
|
|
@ -20,7 +22,7 @@ def send_deletion_success_task(to: str, language: str = "en-US") -> None:
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start send account deletion success email to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start send account deletion success email to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -36,11 +38,11 @@ def send_deletion_success_task(to: str, language: str = "en-US") -> None:
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Send account deletion success email to {to}: latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Send account deletion success email to %s failed", to)
|
||||
logger.exception("Send account deletion success email to %s failed", to)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
|
|
@ -56,7 +58,7 @@ def send_account_deletion_verification_code(to: str, code: str, language: str =
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start send account deletion verification code email to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start send account deletion verification code email to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -72,7 +74,7 @@ def send_account_deletion_verification_code(to: str, code: str, language: str =
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
"Send account deletion verification code email to {} succeeded: latency: {}".format(
|
||||
to, end_at - start_at
|
||||
|
|
@ -81,4 +83,4 @@ def send_account_deletion_verification_code(to: str, code: str, language: str =
|
|||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Send account deletion verification code email to %s failed", to)
|
||||
logger.exception("Send account deletion verification code email to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from celery import shared_task
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_change_mail_task(language: str, to: str, code: str, phase: str) -> None:
|
||||
|
|
@ -22,7 +24,7 @@ def send_change_mail_task(language: str, to: str, code: str, phase: str) -> None
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start change email mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start change email mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -35,9 +37,9 @@ def send_change_mail_task(language: str, to: str, code: str, phase: str) -> None
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Send change email mail to {to} succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Send change email mail to {to} succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Send change email mail to %s failed", to)
|
||||
logger.exception("Send change email mail to %s failed", to)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
|
|
@ -52,7 +54,7 @@ def send_change_mail_completed_notification_task(language: str, to: str) -> None
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start change email completed notify mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start change email completed notify mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -68,11 +70,11 @@ def send_change_mail_completed_notification_task(language: str, to: str) -> None
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Send change email completed mail to {to} succeeded: latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Send change email completed mail to %s failed", to)
|
||||
logger.exception("Send change email completed mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from celery import shared_task
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_email_code_login_mail_task(language: str, to: str, code: str) -> None:
|
||||
|
|
@ -21,7 +23,7 @@ def send_email_code_login_mail_task(language: str, to: str, code: str) -> None:
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start email code login mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start email code login mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -37,8 +39,8 @@ def send_email_code_login_mail_task(language: str, to: str, code: str) -> None:
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Send email code login mail to {to} succeeded: latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Send email code login mail to %s failed", to)
|
||||
logger.exception("Send email code login mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -9,13 +9,15 @@ from flask import render_template_string
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_inner_email_task(to: list[str], subject: str, body: str, substitutions: Mapping[str, str]):
|
||||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start enterprise mail to {to} with subject {subject}", fg="green"))
|
||||
logger.info(click.style(f"Start enterprise mail to {to} with subject {subject}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -25,6 +27,6 @@ def send_inner_email_task(to: list[str], subject: str, body: str, substitutions:
|
|||
email_service.send_raw_email(to=to, subject=subject, html_content=html_content)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Send enterprise mail to {to} succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Send enterprise mail to {to} succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Send enterprise mail to %s failed", to)
|
||||
logger.exception("Send enterprise mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ from configs import dify_config
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_invite_member_mail_task(language: str, to: str, token: str, inviter_name: str, workspace_name: str) -> None:
|
||||
|
|
@ -24,7 +26,7 @@ def send_invite_member_mail_task(language: str, to: str, token: str, inviter_nam
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start send invite member mail to {to} in workspace {workspace_name}", fg="green"))
|
||||
logger.info(click.style(f"Start send invite member mail to {to} in workspace {workspace_name}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -43,8 +45,6 @@ def send_invite_member_mail_task(language: str, to: str, token: str, inviter_nam
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style(f"Send invite member mail to {to} succeeded: latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
logger.info(click.style(f"Send invite member mail to {to} succeeded: latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Send invite member mail to %s failed", to)
|
||||
logger.exception("Send invite member mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from celery import shared_task
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_owner_transfer_confirm_task(language: str, to: str, code: str, workspace: str) -> None:
|
||||
|
|
@ -22,7 +24,7 @@ def send_owner_transfer_confirm_task(language: str, to: str, code: str, workspac
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start owner transfer confirm mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start owner transfer confirm mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -39,14 +41,14 @@ def send_owner_transfer_confirm_task(language: str, to: str, code: str, workspac
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Send owner transfer confirm mail to {to} succeeded: latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("owner transfer confirm email mail to %s failed", to)
|
||||
logger.exception("owner transfer confirm email mail to %s failed", to)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
|
|
@ -63,7 +65,7 @@ def send_old_owner_transfer_notify_email_task(language: str, to: str, workspace:
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start old owner transfer notify mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start old owner transfer notify mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -80,14 +82,14 @@ def send_old_owner_transfer_notify_email_task(language: str, to: str, workspace:
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Send old owner transfer notify mail to {to} succeeded: latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("old owner transfer notify email mail to %s failed", to)
|
||||
logger.exception("old owner transfer notify email mail to %s failed", to)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
|
|
@ -103,7 +105,7 @@ def send_new_owner_transfer_notify_email_task(language: str, to: str, workspace:
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start new owner transfer notify mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start new owner transfer notify mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -119,11 +121,11 @@ def send_new_owner_transfer_notify_email_task(language: str, to: str, workspace:
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(
|
||||
f"Send new owner transfer notify mail to {to} succeeded: latency: {end_at - start_at}",
|
||||
fg="green",
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("new owner transfer notify email mail to %s failed", to)
|
||||
logger.exception("new owner transfer notify email mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -7,6 +7,8 @@ from celery import shared_task
|
|||
from extensions.ext_mail import mail
|
||||
from libs.email_i18n import EmailType, get_email_i18n_service
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="mail")
|
||||
def send_reset_password_mail_task(language: str, to: str, code: str) -> None:
|
||||
|
|
@ -21,7 +23,7 @@ def send_reset_password_mail_task(language: str, to: str, code: str) -> None:
|
|||
if not mail.is_inited():
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start password reset mail to {to}", fg="green"))
|
||||
logger.info(click.style(f"Start password reset mail to {to}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
try:
|
||||
|
|
@ -37,8 +39,8 @@ def send_reset_password_mail_task(language: str, to: str, code: str) -> None:
|
|||
)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
logger.info(
|
||||
click.style(f"Send password reset mail to {to} succeeded: latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("Send password reset mail to %s failed", to)
|
||||
logger.exception("Send password reset mail to %s failed", to)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from extensions.ext_storage import storage
|
|||
from models.model import Message
|
||||
from models.workflow import WorkflowRun
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="ops_trace")
|
||||
def process_trace_tasks(file_info):
|
||||
|
|
@ -43,11 +45,11 @@ def process_trace_tasks(file_info):
|
|||
if trace_type:
|
||||
trace_info = trace_type(**trace_info)
|
||||
trace_instance.trace(trace_info)
|
||||
logging.info("Processing trace tasks success, app_id: %s", app_id)
|
||||
logger.info("Processing trace tasks success, app_id: %s", app_id)
|
||||
except Exception as e:
|
||||
logging.info("error:\n\n\n%s\n\n\n\n", e)
|
||||
logger.info("error:\n\n\n%s\n\n\n\n", e)
|
||||
failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}"
|
||||
redis_client.incr(failed_key)
|
||||
logging.info("Processing trace tasks failed, app_id: %s", app_id)
|
||||
logger.info("Processing trace tasks failed, app_id: %s", app_id)
|
||||
finally:
|
||||
storage.delete(file_path)
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ from core.indexing_runner import DocumentIsPausedError, IndexingRunner
|
|||
from extensions.ext_database import db
|
||||
from models.dataset import Document
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def recover_document_indexing_task(dataset_id: str, document_id: str):
|
||||
|
|
@ -18,13 +20,13 @@ def recover_document_indexing_task(dataset_id: str, document_id: str):
|
|||
|
||||
Usage: recover_document_indexing_task.delay(dataset_id, document_id)
|
||||
"""
|
||||
logging.info(click.style(f"Recover document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Recover document: {document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -37,10 +39,10 @@ def recover_document_indexing_task(dataset_id: str, document_id: str):
|
|||
elif document.indexing_status == "indexing":
|
||||
indexing_runner.run_in_indexing_status(document)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Processed document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Processed document: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
except DocumentIsPausedError as ex:
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
except Exception:
|
||||
logging.exception("recover_document_indexing_task failed, document_id: %s", document_id)
|
||||
logger.exception("recover_document_indexing_task failed, document_id: %s", document_id)
|
||||
finally:
|
||||
db.session.close()
|
||||
|
|
|
|||
|
|
@ -40,10 +40,12 @@ from models.workflow import (
|
|||
)
|
||||
from repositories.factory import DifyAPIRepositoryFactory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="app_deletion", bind=True, max_retries=3)
|
||||
def remove_app_and_related_data_task(self, tenant_id: str, app_id: str):
|
||||
logging.info(click.style(f"Start deleting app and related data: {tenant_id}:{app_id}", fg="green"))
|
||||
logger.info(click.style(f"Start deleting app and related data: {tenant_id}:{app_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
try:
|
||||
# Delete related data
|
||||
|
|
@ -69,14 +71,12 @@ def remove_app_and_related_data_task(self, tenant_id: str, app_id: str):
|
|||
_delete_draft_variables(app_id)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"App and related data deleted: {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"App and related data deleted: {app_id} latency: {end_at - start_at}", fg="green"))
|
||||
except SQLAlchemyError as e:
|
||||
logging.exception(
|
||||
click.style(f"Database error occurred while deleting app {app_id} and related data", fg="red")
|
||||
)
|
||||
logger.exception(click.style(f"Database error occurred while deleting app {app_id} and related data", fg="red"))
|
||||
raise self.retry(exc=e, countdown=60) # Retry after 60 seconds
|
||||
except Exception as e:
|
||||
logging.exception(click.style(f"Error occurred while deleting app {app_id} and related data", fg="red"))
|
||||
logger.exception(click.style(f"Error occurred while deleting app {app_id} and related data", fg="red"))
|
||||
raise self.retry(exc=e, countdown=60) # Retry after 60 seconds
|
||||
|
||||
|
||||
|
|
@ -215,7 +215,7 @@ def _delete_app_workflow_runs(tenant_id: str, app_id: str):
|
|||
batch_size=1000,
|
||||
)
|
||||
|
||||
logging.info("Deleted %s workflow runs for app %s", deleted_count, app_id)
|
||||
logger.info("Deleted %s workflow runs for app %s", deleted_count, app_id)
|
||||
|
||||
|
||||
def _delete_app_workflow_node_executions(tenant_id: str, app_id: str):
|
||||
|
|
@ -229,7 +229,7 @@ def _delete_app_workflow_node_executions(tenant_id: str, app_id: str):
|
|||
batch_size=1000,
|
||||
)
|
||||
|
||||
logging.info("Deleted %s workflow node executions for app %s", deleted_count, app_id)
|
||||
logger.info("Deleted %s workflow node executions for app %s", deleted_count, app_id)
|
||||
|
||||
|
||||
def _delete_app_workflow_app_logs(tenant_id: str, app_id: str):
|
||||
|
|
@ -266,7 +266,7 @@ def _delete_conversation_variables(*, app_id: str):
|
|||
with db.engine.connect() as conn:
|
||||
conn.execute(stmt)
|
||||
conn.commit()
|
||||
logging.info(click.style(f"Deleted conversation variables for app {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Deleted conversation variables for app {app_id}", fg="green"))
|
||||
|
||||
|
||||
def _delete_app_messages(tenant_id: str, app_id: str):
|
||||
|
|
@ -389,9 +389,9 @@ def delete_draft_variables_batch(app_id: str, batch_size: int = 1000) -> int:
|
|||
batch_deleted = deleted_result.rowcount
|
||||
total_deleted += batch_deleted
|
||||
|
||||
logging.info(click.style(f"Deleted {batch_deleted} draft variables (batch) for app {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Deleted {batch_deleted} draft variables (batch) for app {app_id}", fg="green"))
|
||||
|
||||
logging.info(click.style(f"Deleted {total_deleted} total draft variables for app {app_id}", fg="green"))
|
||||
logger.info(click.style(f"Deleted {total_deleted} total draft variables for app {app_id}", fg="green"))
|
||||
return total_deleted
|
||||
|
||||
|
||||
|
|
@ -407,8 +407,8 @@ def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: s
|
|||
try:
|
||||
delete_func(record_id)
|
||||
db.session.commit()
|
||||
logging.info(click.style(f"Deleted {name} {record_id}", fg="green"))
|
||||
logger.info(click.style(f"Deleted {name} {record_id}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("Error occurred while deleting %s %s", name, record_id)
|
||||
logger.exception("Error occurred while deleting %s %s", name, record_id)
|
||||
continue
|
||||
rs.close()
|
||||
|
|
|
|||
|
|
@ -10,6 +10,8 @@ from extensions.ext_redis import redis_client
|
|||
from libs.datetime_utils import naive_utc_now
|
||||
from models.dataset import Document, DocumentSegment
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def remove_document_from_index_task(document_id: str):
|
||||
|
|
@ -19,17 +21,17 @@ def remove_document_from_index_task(document_id: str):
|
|||
|
||||
Usage: remove_document_from_index.delay(document_id)
|
||||
"""
|
||||
logging.info(click.style(f"Start remove document segments from index: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start remove document segments from index: {document_id}", fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
document = db.session.query(Document).where(Document.id == document_id).first()
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
if document.indexing_status != "completed":
|
||||
logging.info(click.style(f"Document is not completed, remove is not allowed: {document_id}", fg="red"))
|
||||
logger.info(click.style(f"Document is not completed, remove is not allowed: {document_id}", fg="red"))
|
||||
db.session.close()
|
||||
return
|
||||
|
||||
|
|
@ -49,7 +51,7 @@ def remove_document_from_index_task(document_id: str):
|
|||
try:
|
||||
index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=False)
|
||||
except Exception:
|
||||
logging.exception("clean dataset %s from index failed", dataset.id)
|
||||
logger.exception("clean dataset %s from index failed", dataset.id)
|
||||
# update segment to disable
|
||||
db.session.query(DocumentSegment).where(DocumentSegment.document_id == document.id).update(
|
||||
{
|
||||
|
|
@ -62,11 +64,9 @@ def remove_document_from_index_task(document_id: str):
|
|||
db.session.commit()
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style(f"Document removed from index: {document.id} latency: {end_at - start_at}", fg="green")
|
||||
)
|
||||
logger.info(click.style(f"Document removed from index: {document.id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception:
|
||||
logging.exception("remove document from index failed")
|
||||
logger.exception("remove document from index failed")
|
||||
if not document.archived:
|
||||
document.enabled = True
|
||||
db.session.commit()
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
|
||||
|
|
@ -27,7 +29,7 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
|
|||
try:
|
||||
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()
|
||||
if not dataset:
|
||||
logging.info(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
logger.info(click.style(f"Dataset not found: {dataset_id}", fg="red"))
|
||||
return
|
||||
tenant_id = dataset.tenant_id
|
||||
for document_id in document_ids:
|
||||
|
|
@ -57,12 +59,12 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
|
|||
redis_client.delete(retry_indexing_cache_key)
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start retry document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start retry document: {document_id}", fg="green"))
|
||||
document = (
|
||||
db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
)
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="yellow"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="yellow"))
|
||||
return
|
||||
try:
|
||||
# clean old data
|
||||
|
|
@ -92,13 +94,13 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
|
|||
document.stopped_at = naive_utc_now()
|
||||
db.session.add(document)
|
||||
db.session.commit()
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
redis_client.delete(retry_indexing_cache_key)
|
||||
logging.exception("retry_document_indexing_task failed, document_id: %s", document_id)
|
||||
logger.exception("retry_document_indexing_task failed, document_id: %s", document_id)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Retry dataset: {dataset_id} latency: {end_at - start_at}", fg="green"))
|
||||
except Exception as e:
|
||||
logging.exception(
|
||||
logger.exception(
|
||||
"retry_document_indexing_task failed, dataset_id: %s, document_ids: %s", dataset_id, document_ids
|
||||
)
|
||||
raise e
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ from libs.datetime_utils import naive_utc_now
|
|||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from services.feature_service import FeatureService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@shared_task(queue="dataset")
|
||||
def sync_website_document_indexing_task(dataset_id: str, document_id: str):
|
||||
|
|
@ -52,10 +54,10 @@ def sync_website_document_indexing_task(dataset_id: str, document_id: str):
|
|||
redis_client.delete(sync_indexing_cache_key)
|
||||
return
|
||||
|
||||
logging.info(click.style(f"Start sync website document: {document_id}", fg="green"))
|
||||
logger.info(click.style(f"Start sync website document: {document_id}", fg="green"))
|
||||
document = db.session.query(Document).where(Document.id == document_id, Document.dataset_id == dataset_id).first()
|
||||
if not document:
|
||||
logging.info(click.style(f"Document not found: {document_id}", fg="yellow"))
|
||||
logger.info(click.style(f"Document not found: {document_id}", fg="yellow"))
|
||||
return
|
||||
try:
|
||||
# clean old data
|
||||
|
|
@ -85,8 +87,8 @@ def sync_website_document_indexing_task(dataset_id: str, document_id: str):
|
|||
document.stopped_at = naive_utc_now()
|
||||
db.session.add(document)
|
||||
db.session.commit()
|
||||
logging.info(click.style(str(ex), fg="yellow"))
|
||||
logger.info(click.style(str(ex), fg="yellow"))
|
||||
redis_client.delete(sync_indexing_cache_key)
|
||||
logging.exception("sync_website_document_indexing_task failed, document_id: %s", document_id)
|
||||
logger.exception("sync_website_document_indexing_task failed, document_id: %s", document_id)
|
||||
end_at = time.perf_counter()
|
||||
logging.info(click.style(f"Sync document: {document_id} latency: {end_at - start_at}", fg="green"))
|
||||
logger.info(click.style(f"Sync document: {document_id} latency: {end_at - start_at}", fg="green"))
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ def mock_response_receiver(monkeypatch) -> mock.Mock:
|
|||
@pytest.fixture
|
||||
def mock_logger(monkeypatch) -> logging.Logger:
|
||||
_logger = mock.MagicMock(spec=logging.Logger)
|
||||
monkeypatch.setattr(ext_request_logging, "_logger", _logger)
|
||||
monkeypatch.setattr(ext_request_logging, "logger", _logger)
|
||||
return _logger
|
||||
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue