feat: enhance tencent trace integration with LLM core metrics (#27126)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com>
This commit is contained in:
XlKsyt 2025-10-29 15:53:30 +08:00 committed by GitHub
parent 82890fe38e
commit 1e9142c213
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 609 additions and 57 deletions

View File

@ -1,3 +1,4 @@
import json
import logging import logging
import re import re
import time import time
@ -60,6 +61,7 @@ from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTas
from core.app.task_pipeline.message_cycle_manager import MessageCycleManager from core.app.task_pipeline.message_cycle_manager import MessageCycleManager
from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk
from core.model_runtime.entities.llm_entities import LLMUsage from core.model_runtime.entities.llm_entities import LLMUsage
from core.model_runtime.utils.encoders import jsonable_encoder
from core.ops.ops_trace_manager import TraceQueueManager from core.ops.ops_trace_manager import TraceQueueManager
from core.workflow.enums import WorkflowExecutionStatus from core.workflow.enums import WorkflowExecutionStatus
from core.workflow.nodes import NodeType from core.workflow.nodes import NodeType
@ -391,6 +393,14 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
if should_direct_answer: if should_direct_answer:
return return
current_time = time.perf_counter()
if self._task_state.first_token_time is None and delta_text.strip():
self._task_state.first_token_time = current_time
self._task_state.is_streaming_response = True
if delta_text.strip():
self._task_state.last_token_time = current_time
# Only publish tts message at text chunk streaming # Only publish tts message at text chunk streaming
if tts_publisher and queue_message: if tts_publisher and queue_message:
tts_publisher.publish(queue_message) tts_publisher.publish(queue_message)
@ -772,7 +782,33 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
message.answer = answer_text message.answer = answer_text
message.updated_at = naive_utc_now() message.updated_at = naive_utc_now()
message.provider_response_latency = time.perf_counter() - self._base_task_pipeline.start_at message.provider_response_latency = time.perf_counter() - self._base_task_pipeline.start_at
message.message_metadata = self._task_state.metadata.model_dump_json()
# Set usage first before dumping metadata
if graph_runtime_state and graph_runtime_state.llm_usage:
usage = graph_runtime_state.llm_usage
message.message_tokens = usage.prompt_tokens
message.message_unit_price = usage.prompt_unit_price
message.message_price_unit = usage.prompt_price_unit
message.answer_tokens = usage.completion_tokens
message.answer_unit_price = usage.completion_unit_price
message.answer_price_unit = usage.completion_price_unit
message.total_price = usage.total_price
message.currency = usage.currency
self._task_state.metadata.usage = usage
else:
usage = LLMUsage.empty_usage()
self._task_state.metadata.usage = usage
# Add streaming metrics to usage if available
if self._task_state.is_streaming_response and self._task_state.first_token_time:
start_time = self._base_task_pipeline.start_at
first_token_time = self._task_state.first_token_time
last_token_time = self._task_state.last_token_time or first_token_time
usage.time_to_first_token = round(first_token_time - start_time, 3)
usage.time_to_generate = round(last_token_time - first_token_time, 3)
metadata = self._task_state.metadata.model_dump()
message.message_metadata = json.dumps(jsonable_encoder(metadata))
message_files = [ message_files = [
MessageFile( MessageFile(
message_id=message.id, message_id=message.id,
@ -790,20 +826,6 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
] ]
session.add_all(message_files) session.add_all(message_files)
if graph_runtime_state and graph_runtime_state.llm_usage:
usage = graph_runtime_state.llm_usage
message.message_tokens = usage.prompt_tokens
message.message_unit_price = usage.prompt_unit_price
message.message_price_unit = usage.prompt_price_unit
message.answer_tokens = usage.completion_tokens
message.answer_unit_price = usage.completion_unit_price
message.answer_price_unit = usage.completion_price_unit
message.total_price = usage.total_price
message.currency = usage.currency
self._task_state.metadata.usage = usage
else:
self._task_state.metadata.usage = LLMUsage.empty_usage()
def _seed_graph_runtime_state_from_queue_manager(self) -> None: def _seed_graph_runtime_state_from_queue_manager(self) -> None:
"""Bootstrap the cached runtime state from the queue manager when present.""" """Bootstrap the cached runtime state from the queue manager when present."""
candidate = self._base_task_pipeline.queue_manager.graph_runtime_state candidate = self._base_task_pipeline.queue_manager.graph_runtime_state

View File

@ -48,6 +48,9 @@ class WorkflowTaskState(TaskState):
""" """
answer: str = "" answer: str = ""
first_token_time: float | None = None
last_token_time: float | None = None
is_streaming_response: bool = False
class StreamEvent(StrEnum): class StreamEvent(StrEnum):

View File

@ -38,6 +38,8 @@ class LLMUsageMetadata(TypedDict, total=False):
prompt_price: Union[float, str] prompt_price: Union[float, str]
completion_price: Union[float, str] completion_price: Union[float, str]
latency: float latency: float
time_to_first_token: float
time_to_generate: float
class LLMUsage(ModelUsage): class LLMUsage(ModelUsage):
@ -57,6 +59,8 @@ class LLMUsage(ModelUsage):
total_price: Decimal total_price: Decimal
currency: str currency: str
latency: float latency: float
time_to_first_token: float | None = None
time_to_generate: float | None = None
@classmethod @classmethod
def empty_usage(cls): def empty_usage(cls):
@ -73,6 +77,8 @@ class LLMUsage(ModelUsage):
total_price=Decimal("0.0"), total_price=Decimal("0.0"),
currency="USD", currency="USD",
latency=0.0, latency=0.0,
time_to_first_token=None,
time_to_generate=None,
) )
@classmethod @classmethod
@ -108,6 +114,8 @@ class LLMUsage(ModelUsage):
prompt_price=Decimal(str(metadata.get("prompt_price", 0))), prompt_price=Decimal(str(metadata.get("prompt_price", 0))),
completion_price=Decimal(str(metadata.get("completion_price", 0))), completion_price=Decimal(str(metadata.get("completion_price", 0))),
latency=metadata.get("latency", 0.0), latency=metadata.get("latency", 0.0),
time_to_first_token=metadata.get("time_to_first_token"),
time_to_generate=metadata.get("time_to_generate"),
) )
def plus(self, other: LLMUsage) -> LLMUsage: def plus(self, other: LLMUsage) -> LLMUsage:
@ -133,6 +141,8 @@ class LLMUsage(ModelUsage):
total_price=self.total_price + other.total_price, total_price=self.total_price + other.total_price,
currency=other.currency, currency=other.currency,
latency=self.latency + other.latency, latency=self.latency + other.latency,
time_to_first_token=other.time_to_first_token,
time_to_generate=other.time_to_generate,
) )
def __add__(self, other: LLMUsage) -> LLMUsage: def __add__(self, other: LLMUsage) -> LLMUsage:

View File

@ -62,6 +62,9 @@ class MessageTraceInfo(BaseTraceInfo):
file_list: Union[str, dict[str, Any], list] | None = None file_list: Union[str, dict[str, Any], list] | None = None
message_file_data: Any | None = None message_file_data: Any | None = None
conversation_mode: str conversation_mode: str
gen_ai_server_time_to_first_token: float | None = None
llm_streaming_time_to_generate: float | None = None
is_streaming_request: bool = False
class ModerationTraceInfo(BaseTraceInfo): class ModerationTraceInfo(BaseTraceInfo):

View File

@ -619,6 +619,8 @@ class TraceTask:
file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else "" file_url = f"{self.file_base_url}/{message_file_data.url}" if message_file_data else ""
file_list.append(file_url) file_list.append(file_url)
streaming_metrics = self._extract_streaming_metrics(message_data)
metadata = { metadata = {
"conversation_id": message_data.conversation_id, "conversation_id": message_data.conversation_id,
"ls_provider": message_data.model_provider, "ls_provider": message_data.model_provider,
@ -651,6 +653,9 @@ class TraceTask:
metadata=metadata, metadata=metadata,
message_file_data=message_file_data, message_file_data=message_file_data,
conversation_mode=conversation_mode, conversation_mode=conversation_mode,
gen_ai_server_time_to_first_token=streaming_metrics.get("gen_ai_server_time_to_first_token"),
llm_streaming_time_to_generate=streaming_metrics.get("llm_streaming_time_to_generate"),
is_streaming_request=streaming_metrics.get("is_streaming_request", False),
) )
return message_trace_info return message_trace_info
@ -876,6 +881,24 @@ class TraceTask:
return generate_name_trace_info return generate_name_trace_info
def _extract_streaming_metrics(self, message_data) -> dict:
if not message_data.message_metadata:
return {}
try:
metadata = json.loads(message_data.message_metadata)
usage = metadata.get("usage", {})
time_to_first_token = usage.get("time_to_first_token")
time_to_generate = usage.get("time_to_generate")
return {
"gen_ai_server_time_to_first_token": time_to_first_token,
"llm_streaming_time_to_generate": time_to_generate,
"is_streaming_request": time_to_first_token is not None,
}
except (json.JSONDecodeError, AttributeError):
return {}
trace_manager_timer: threading.Timer | None = None trace_manager_timer: threading.Timer | None = None
trace_manager_queue: queue.Queue = queue.Queue() trace_manager_queue: queue.Queue = queue.Queue()

View File

@ -11,6 +11,11 @@ import socket
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from urllib.parse import urlparse from urllib.parse import urlparse
try:
from importlib.metadata import version
except ImportError:
from importlib_metadata import version # type: ignore[import-not-found]
if TYPE_CHECKING: if TYPE_CHECKING:
from opentelemetry.metrics import Meter from opentelemetry.metrics import Meter
from opentelemetry.metrics._internal.instrument import Histogram from opentelemetry.metrics._internal.instrument import Histogram
@ -27,12 +32,27 @@ from opentelemetry.util.types import AttributeValue
from configs import dify_config from configs import dify_config
from .entities.tencent_semconv import LLM_OPERATION_DURATION from .entities.semconv import (
GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
GEN_AI_STREAMING_TIME_TO_GENERATE,
GEN_AI_TOKEN_USAGE,
GEN_AI_TRACE_DURATION,
LLM_OPERATION_DURATION,
)
from .entities.tencent_trace_entity import SpanData from .entities.tencent_trace_entity import SpanData
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _get_opentelemetry_sdk_version() -> str:
"""Get OpenTelemetry SDK version dynamically."""
try:
return version("opentelemetry-sdk")
except Exception:
logger.debug("Failed to get opentelemetry-sdk version, using default")
return "1.27.0" # fallback version
class TencentTraceClient: class TencentTraceClient:
"""Tencent APM trace client using OpenTelemetry OTLP exporter""" """Tencent APM trace client using OpenTelemetry OTLP exporter"""
@ -57,6 +77,9 @@ class TencentTraceClient:
ResourceAttributes.SERVICE_VERSION: f"dify-{dify_config.project.version}-{dify_config.COMMIT_SHA}", ResourceAttributes.SERVICE_VERSION: f"dify-{dify_config.project.version}-{dify_config.COMMIT_SHA}",
ResourceAttributes.DEPLOYMENT_ENVIRONMENT: f"{dify_config.DEPLOY_ENV}-{dify_config.EDITION}", ResourceAttributes.DEPLOYMENT_ENVIRONMENT: f"{dify_config.DEPLOY_ENV}-{dify_config.EDITION}",
ResourceAttributes.HOST_NAME: socket.gethostname(), ResourceAttributes.HOST_NAME: socket.gethostname(),
ResourceAttributes.TELEMETRY_SDK_LANGUAGE: "python",
ResourceAttributes.TELEMETRY_SDK_NAME: "opentelemetry",
ResourceAttributes.TELEMETRY_SDK_VERSION: _get_opentelemetry_sdk_version(),
} }
) )
# Prepare gRPC endpoint/metadata # Prepare gRPC endpoint/metadata
@ -80,13 +103,18 @@ class TencentTraceClient:
) )
self.tracer_provider.add_span_processor(self.span_processor) self.tracer_provider.add_span_processor(self.span_processor)
self.tracer = self.tracer_provider.get_tracer("dify.tencent_apm") # use dify api version as tracer version
self.tracer = self.tracer_provider.get_tracer("dify-sdk", dify_config.project.version)
# Store span contexts for parent-child relationships # Store span contexts for parent-child relationships
self.span_contexts: dict[int, trace_api.SpanContext] = {} self.span_contexts: dict[int, trace_api.SpanContext] = {}
self.meter: Meter | None = None self.meter: Meter | None = None
self.hist_llm_duration: Histogram | None = None self.hist_llm_duration: Histogram | None = None
self.hist_token_usage: Histogram | None = None
self.hist_time_to_first_token: Histogram | None = None
self.hist_time_to_generate: Histogram | None = None
self.hist_trace_duration: Histogram | None = None
self.metric_reader: MetricReader | None = None self.metric_reader: MetricReader | None = None
# Metrics exporter and instruments # Metrics exporter and instruments
@ -99,7 +127,7 @@ class TencentTraceClient:
use_http_protobuf = protocol in {"http/protobuf", "http-protobuf"} use_http_protobuf = protocol in {"http/protobuf", "http-protobuf"}
use_http_json = protocol in {"http/json", "http-json"} use_http_json = protocol in {"http/json", "http-json"}
# Set preferred temporality for histograms to DELTA # Tencent APM works best with delta aggregation temporality
preferred_temporality: dict[type, AggregationTemporality] = {Histogram: AggregationTemporality.DELTA} preferred_temporality: dict[type, AggregationTemporality] = {Histogram: AggregationTemporality.DELTA}
def _create_metric_exporter(exporter_cls, **kwargs): def _create_metric_exporter(exporter_cls, **kwargs):
@ -177,20 +205,59 @@ class TencentTraceClient:
provider = MeterProvider(resource=self.resource, metric_readers=[metric_reader]) provider = MeterProvider(resource=self.resource, metric_readers=[metric_reader])
metrics.set_meter_provider(provider) metrics.set_meter_provider(provider)
self.meter = metrics.get_meter("dify-sdk", dify_config.project.version) self.meter = metrics.get_meter("dify-sdk", dify_config.project.version)
# LLM operation duration histogram
self.hist_llm_duration = self.meter.create_histogram( self.hist_llm_duration = self.meter.create_histogram(
name=LLM_OPERATION_DURATION, name=LLM_OPERATION_DURATION,
unit="s", unit="s",
description="LLM operation duration (seconds)", description="LLM operation duration (seconds)",
) )
# Token usage histogram with exponential buckets
self.hist_token_usage = self.meter.create_histogram(
name=GEN_AI_TOKEN_USAGE,
unit="token",
description="Number of tokens used in prompt and completions",
)
# Time to first token histogram
self.hist_time_to_first_token = self.meter.create_histogram(
name=GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
unit="s",
description="Time to first token for streaming LLM responses (seconds)",
)
# Time to generate histogram
self.hist_time_to_generate = self.meter.create_histogram(
name=GEN_AI_STREAMING_TIME_TO_GENERATE,
unit="s",
description="Total time to generate streaming LLM responses (seconds)",
)
# Trace duration histogram
self.hist_trace_duration = self.meter.create_histogram(
name=GEN_AI_TRACE_DURATION,
unit="s",
description="End-to-end GenAI trace duration (seconds)",
)
self.metric_reader = metric_reader self.metric_reader = metric_reader
else: else:
self.meter = None self.meter = None
self.hist_llm_duration = None self.hist_llm_duration = None
self.hist_token_usage = None
self.hist_time_to_first_token = None
self.hist_time_to_generate = None
self.hist_trace_duration = None
self.metric_reader = None self.metric_reader = None
except Exception: except Exception:
logger.exception("[Tencent APM] Metrics initialization failed; metrics disabled") logger.exception("[Tencent APM] Metrics initialization failed; metrics disabled")
self.meter = None self.meter = None
self.hist_llm_duration = None self.hist_llm_duration = None
self.hist_token_usage = None
self.hist_time_to_first_token = None
self.hist_time_to_generate = None
self.hist_trace_duration = None
self.metric_reader = None self.metric_reader = None
def add_span(self, span_data: SpanData) -> None: def add_span(self, span_data: SpanData) -> None:
@ -216,6 +283,117 @@ class TencentTraceClient:
except Exception: except Exception:
logger.debug("[Tencent APM] Failed to record LLM duration", exc_info=True) logger.debug("[Tencent APM] Failed to record LLM duration", exc_info=True)
def record_token_usage(
self,
token_count: int,
token_type: str,
operation_name: str,
request_model: str,
response_model: str,
server_address: str,
provider: str,
) -> None:
"""Record token usage histogram.
Args:
token_count: Number of tokens used
token_type: "input" or "output"
operation_name: Operation name (e.g., "chat")
request_model: Model used in request
response_model: Model used in response
server_address: Server address
provider: Model provider name
"""
try:
if not hasattr(self, "hist_token_usage") or self.hist_token_usage is None:
return
attributes = {
"gen_ai.operation.name": operation_name,
"gen_ai.request.model": request_model,
"gen_ai.response.model": response_model,
"gen_ai.system": provider,
"gen_ai.token.type": token_type,
"server.address": server_address,
}
self.hist_token_usage.record(token_count, attributes) # type: ignore[attr-defined]
except Exception:
logger.debug("[Tencent APM] Failed to record token usage", exc_info=True)
def record_time_to_first_token(
self, ttft_seconds: float, provider: str, model: str, operation_name: str = "chat"
) -> None:
"""Record time to first token histogram.
Args:
ttft_seconds: Time to first token in seconds
provider: Model provider name
model: Model name
operation_name: Operation name (default: "chat")
"""
try:
if not hasattr(self, "hist_time_to_first_token") or self.hist_time_to_first_token is None:
return
attributes = {
"gen_ai.operation.name": operation_name,
"gen_ai.system": provider,
"gen_ai.request.model": model,
"gen_ai.response.model": model,
"stream": "true",
}
self.hist_time_to_first_token.record(ttft_seconds, attributes) # type: ignore[attr-defined]
except Exception:
logger.debug("[Tencent APM] Failed to record time to first token", exc_info=True)
def record_time_to_generate(
self, ttg_seconds: float, provider: str, model: str, operation_name: str = "chat"
) -> None:
"""Record time to generate histogram.
Args:
ttg_seconds: Time to generate in seconds
provider: Model provider name
model: Model name
operation_name: Operation name (default: "chat")
"""
try:
if not hasattr(self, "hist_time_to_generate") or self.hist_time_to_generate is None:
return
attributes = {
"gen_ai.operation.name": operation_name,
"gen_ai.system": provider,
"gen_ai.request.model": model,
"gen_ai.response.model": model,
"stream": "true",
}
self.hist_time_to_generate.record(ttg_seconds, attributes) # type: ignore[attr-defined]
except Exception:
logger.debug("[Tencent APM] Failed to record time to generate", exc_info=True)
def record_trace_duration(self, duration_seconds: float, attributes: dict[str, str] | None = None) -> None:
"""Record end-to-end trace duration histogram in seconds.
Args:
duration_seconds: Trace duration in seconds
attributes: Optional attributes (e.g., conversation_mode, app_id)
"""
try:
if not hasattr(self, "hist_trace_duration") or self.hist_trace_duration is None:
return
attrs: dict[str, str] = {}
if attributes:
for k, v in attributes.items():
attrs[k] = str(v) if not isinstance(v, (str, int, float, bool)) else v # type: ignore[assignment]
self.hist_trace_duration.record(duration_seconds, attrs) # type: ignore[attr-defined]
except Exception:
logger.debug("[Tencent APM] Failed to record trace duration", exc_info=True)
def _create_and_export_span(self, span_data: SpanData) -> None: def _create_and_export_span(self, span_data: SpanData) -> None:
"""Create span using OpenTelemetry Tracer API""" """Create span using OpenTelemetry Tracer API"""
try: try:

View File

@ -47,6 +47,9 @@ GEN_AI_COMPLETION = "gen_ai.completion"
GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reason" GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reason"
# Streaming Span Attributes
GEN_AI_IS_STREAMING_REQUEST = "llm.is_streaming" # Same as OpenLLMetry semconv
# Tool # Tool
TOOL_NAME = "tool.name" TOOL_NAME = "tool.name"
@ -62,6 +65,19 @@ INSTRUMENTATION_LANGUAGE = "python"
# Metrics # Metrics
LLM_OPERATION_DURATION = "gen_ai.client.operation.duration" LLM_OPERATION_DURATION = "gen_ai.client.operation.duration"
GEN_AI_TOKEN_USAGE = "gen_ai.client.token.usage"
GEN_AI_SERVER_TIME_TO_FIRST_TOKEN = "gen_ai.server.time_to_first_token"
GEN_AI_STREAMING_TIME_TO_GENERATE = "gen_ai.streaming.time_to_generate"
# The LLM trace duration which is exclusive to tencent apm
GEN_AI_TRACE_DURATION = "gen_ai.trace.duration"
# Token Usage Attributes
GEN_AI_OPERATION_NAME = "gen_ai.operation.name"
GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
GEN_AI_SYSTEM = "gen_ai.system"
GEN_AI_TOKEN_TYPE = "gen_ai.token.type"
SERVER_ADDRESS = "server.address"
class GenAISpanKind(Enum): class GenAISpanKind(Enum):

View File

@ -14,10 +14,11 @@ from core.ops.entities.trace_entity import (
ToolTraceInfo, ToolTraceInfo,
WorkflowTraceInfo, WorkflowTraceInfo,
) )
from core.ops.tencent_trace.entities.tencent_semconv import ( from core.ops.tencent_trace.entities.semconv import (
GEN_AI_COMPLETION, GEN_AI_COMPLETION,
GEN_AI_FRAMEWORK, GEN_AI_FRAMEWORK,
GEN_AI_IS_ENTRY, GEN_AI_IS_ENTRY,
GEN_AI_IS_STREAMING_REQUEST,
GEN_AI_MODEL_NAME, GEN_AI_MODEL_NAME,
GEN_AI_PROMPT, GEN_AI_PROMPT,
GEN_AI_PROVIDER, GEN_AI_PROVIDER,
@ -156,6 +157,25 @@ class TencentSpanBuilder:
outputs = node_execution.outputs or {} outputs = node_execution.outputs or {}
usage_data = process_data.get("usage", {}) if "usage" in process_data else outputs.get("usage", {}) usage_data = process_data.get("usage", {}) if "usage" in process_data else outputs.get("usage", {})
attributes = {
GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""),
GEN_AI_SPAN_KIND: GenAISpanKind.GENERATION.value,
GEN_AI_FRAMEWORK: "dify",
GEN_AI_MODEL_NAME: process_data.get("model_name", ""),
GEN_AI_PROVIDER: process_data.get("model_provider", ""),
GEN_AI_USAGE_INPUT_TOKENS: str(usage_data.get("prompt_tokens", 0)),
GEN_AI_USAGE_OUTPUT_TOKENS: str(usage_data.get("completion_tokens", 0)),
GEN_AI_USAGE_TOTAL_TOKENS: str(usage_data.get("total_tokens", 0)),
GEN_AI_PROMPT: json.dumps(process_data.get("prompts", []), ensure_ascii=False),
GEN_AI_COMPLETION: str(outputs.get("text", "")),
GEN_AI_RESPONSE_FINISH_REASON: outputs.get("finish_reason", ""),
INPUT_VALUE: json.dumps(process_data.get("prompts", []), ensure_ascii=False),
OUTPUT_VALUE: str(outputs.get("text", "")),
}
if usage_data.get("time_to_first_token") is not None:
attributes[GEN_AI_IS_STREAMING_REQUEST] = "true"
return SpanData( return SpanData(
trace_id=trace_id, trace_id=trace_id,
parent_span_id=workflow_span_id, parent_span_id=workflow_span_id,
@ -163,21 +183,7 @@ class TencentSpanBuilder:
name="GENERATION", name="GENERATION",
start_time=TencentSpanBuilder._get_time_nanoseconds(node_execution.created_at), start_time=TencentSpanBuilder._get_time_nanoseconds(node_execution.created_at),
end_time=TencentSpanBuilder._get_time_nanoseconds(node_execution.finished_at), end_time=TencentSpanBuilder._get_time_nanoseconds(node_execution.finished_at),
attributes={ attributes=attributes,
GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""),
GEN_AI_SPAN_KIND: GenAISpanKind.GENERATION.value,
GEN_AI_FRAMEWORK: "dify",
GEN_AI_MODEL_NAME: process_data.get("model_name", ""),
GEN_AI_PROVIDER: process_data.get("model_provider", ""),
GEN_AI_USAGE_INPUT_TOKENS: str(usage_data.get("prompt_tokens", 0)),
GEN_AI_USAGE_OUTPUT_TOKENS: str(usage_data.get("completion_tokens", 0)),
GEN_AI_USAGE_TOTAL_TOKENS: str(usage_data.get("total_tokens", 0)),
GEN_AI_PROMPT: json.dumps(process_data.get("prompts", []), ensure_ascii=False),
GEN_AI_COMPLETION: str(outputs.get("text", "")),
GEN_AI_RESPONSE_FINISH_REASON: outputs.get("finish_reason", ""),
INPUT_VALUE: json.dumps(process_data.get("prompts", []), ensure_ascii=False),
OUTPUT_VALUE: str(outputs.get("text", "")),
},
status=TencentSpanBuilder._get_workflow_node_status(node_execution), status=TencentSpanBuilder._get_workflow_node_status(node_execution),
) )
@ -191,6 +197,19 @@ class TencentSpanBuilder:
if trace_info.error: if trace_info.error:
status = Status(StatusCode.ERROR, trace_info.error) status = Status(StatusCode.ERROR, trace_info.error)
attributes = {
GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""),
GEN_AI_USER_ID: str(user_id),
GEN_AI_SPAN_KIND: GenAISpanKind.WORKFLOW.value,
GEN_AI_FRAMEWORK: "dify",
GEN_AI_IS_ENTRY: "true",
INPUT_VALUE: str(trace_info.inputs or ""),
OUTPUT_VALUE: str(trace_info.outputs or ""),
}
if trace_info.is_streaming_request:
attributes[GEN_AI_IS_STREAMING_REQUEST] = "true"
return SpanData( return SpanData(
trace_id=trace_id, trace_id=trace_id,
parent_span_id=None, parent_span_id=None,
@ -198,15 +217,7 @@ class TencentSpanBuilder:
name="message", name="message",
start_time=TencentSpanBuilder._get_time_nanoseconds(trace_info.start_time), start_time=TencentSpanBuilder._get_time_nanoseconds(trace_info.start_time),
end_time=TencentSpanBuilder._get_time_nanoseconds(trace_info.end_time), end_time=TencentSpanBuilder._get_time_nanoseconds(trace_info.end_time),
attributes={ attributes=attributes,
GEN_AI_SESSION_ID: trace_info.metadata.get("conversation_id", ""),
GEN_AI_USER_ID: str(user_id),
GEN_AI_SPAN_KIND: GenAISpanKind.WORKFLOW.value,
GEN_AI_FRAMEWORK: "dify",
GEN_AI_IS_ENTRY: "true",
INPUT_VALUE: str(trace_info.inputs or ""),
OUTPUT_VALUE: str(trace_info.outputs or ""),
},
status=status, status=status,
links=links, links=links,
) )

View File

@ -90,6 +90,9 @@ class TencentDataTrace(BaseTraceInstance):
self._process_workflow_nodes(trace_info, trace_id) self._process_workflow_nodes(trace_info, trace_id)
# Record trace duration for entry span
self._record_workflow_trace_duration(trace_info)
except Exception: except Exception:
logger.exception("[Tencent APM] Failed to process workflow trace") logger.exception("[Tencent APM] Failed to process workflow trace")
@ -107,6 +110,11 @@ class TencentDataTrace(BaseTraceInstance):
self.trace_client.add_span(message_span) self.trace_client.add_span(message_span)
self._record_message_llm_metrics(trace_info)
# Record trace duration for entry span
self._record_message_trace_duration(trace_info)
except Exception: except Exception:
logger.exception("[Tencent APM] Failed to process message trace") logger.exception("[Tencent APM] Failed to process message trace")
@ -290,24 +298,219 @@ class TencentDataTrace(BaseTraceInstance):
def _record_llm_metrics(self, node_execution: WorkflowNodeExecution) -> None: def _record_llm_metrics(self, node_execution: WorkflowNodeExecution) -> None:
"""Record LLM performance metrics""" """Record LLM performance metrics"""
try: try:
if not hasattr(self.trace_client, "record_llm_duration"):
return
process_data = node_execution.process_data or {} process_data = node_execution.process_data or {}
usage = process_data.get("usage", {}) outputs = node_execution.outputs or {}
latency_s = float(usage.get("latency", 0.0)) usage = process_data.get("usage", {}) if "usage" in process_data else outputs.get("usage", {})
if latency_s > 0: model_provider = process_data.get("model_provider", "unknown")
attributes = { model_name = process_data.get("model_name", "unknown")
"provider": process_data.get("model_provider", ""), model_mode = process_data.get("model_mode", "chat")
"model": process_data.get("model_name", ""),
"span_kind": "GENERATION", # Record LLM duration
} if hasattr(self.trace_client, "record_llm_duration"):
self.trace_client.record_llm_duration(latency_s, attributes) latency_s = float(usage.get("latency", 0.0))
if latency_s > 0:
# Determine if streaming from usage metrics
is_streaming = usage.get("time_to_first_token") is not None
attributes = {
"gen_ai.system": model_provider,
"gen_ai.response.model": model_name,
"gen_ai.operation.name": model_mode,
"stream": "true" if is_streaming else "false",
}
self.trace_client.record_llm_duration(latency_s, attributes)
# Record streaming metrics from usage
time_to_first_token = usage.get("time_to_first_token")
if time_to_first_token is not None and hasattr(self.trace_client, "record_time_to_first_token"):
ttft_seconds = float(time_to_first_token)
if ttft_seconds > 0:
self.trace_client.record_time_to_first_token(
ttft_seconds=ttft_seconds, provider=model_provider, model=model_name, operation_name=model_mode
)
time_to_generate = usage.get("time_to_generate")
if time_to_generate is not None and hasattr(self.trace_client, "record_time_to_generate"):
ttg_seconds = float(time_to_generate)
if ttg_seconds > 0:
self.trace_client.record_time_to_generate(
ttg_seconds=ttg_seconds, provider=model_provider, model=model_name, operation_name=model_mode
)
# Record token usage
if hasattr(self.trace_client, "record_token_usage"):
# Extract token counts
input_tokens = int(usage.get("prompt_tokens", 0))
output_tokens = int(usage.get("completion_tokens", 0))
if input_tokens > 0 or output_tokens > 0:
server_address = f"{model_provider}"
# Record input tokens
if input_tokens > 0:
self.trace_client.record_token_usage(
token_count=input_tokens,
token_type="input",
operation_name=model_mode,
request_model=model_name,
response_model=model_name,
server_address=server_address,
provider=model_provider,
)
# Record output tokens
if output_tokens > 0:
self.trace_client.record_token_usage(
token_count=output_tokens,
token_type="output",
operation_name=model_mode,
request_model=model_name,
response_model=model_name,
server_address=server_address,
provider=model_provider,
)
except Exception: except Exception:
logger.debug("[Tencent APM] Failed to record LLM metrics") logger.debug("[Tencent APM] Failed to record LLM metrics")
def _record_message_llm_metrics(self, trace_info: MessageTraceInfo) -> None:
"""Record LLM metrics for message traces"""
try:
trace_metadata = trace_info.metadata or {}
message_data = trace_info.message_data or {}
provider_latency = 0.0
if isinstance(message_data, dict):
provider_latency = float(message_data.get("provider_response_latency", 0.0) or 0.0)
else:
provider_latency = float(getattr(message_data, "provider_response_latency", 0.0) or 0.0)
model_provider = trace_metadata.get("ls_provider") or (
message_data.get("model_provider", "") if isinstance(message_data, dict) else ""
)
model_name = trace_metadata.get("ls_model_name") or (
message_data.get("model_id", "") if isinstance(message_data, dict) else ""
)
# Record LLM duration
if provider_latency > 0 and hasattr(self.trace_client, "record_llm_duration"):
is_streaming = trace_info.is_streaming_request
duration_attributes = {
"gen_ai.system": model_provider,
"gen_ai.response.model": model_name,
"gen_ai.operation.name": "chat", # Message traces are always chat
"stream": "true" if is_streaming else "false",
}
self.trace_client.record_llm_duration(provider_latency, duration_attributes)
# Record streaming metrics for message traces
if trace_info.is_streaming_request:
# Record time to first token
if trace_info.gen_ai_server_time_to_first_token is not None and hasattr(
self.trace_client, "record_time_to_first_token"
):
ttft_seconds = float(trace_info.gen_ai_server_time_to_first_token)
if ttft_seconds > 0:
self.trace_client.record_time_to_first_token(
ttft_seconds=ttft_seconds, provider=str(model_provider or ""), model=str(model_name or "")
)
# Record time to generate
if trace_info.llm_streaming_time_to_generate is not None and hasattr(
self.trace_client, "record_time_to_generate"
):
ttg_seconds = float(trace_info.llm_streaming_time_to_generate)
if ttg_seconds > 0:
self.trace_client.record_time_to_generate(
ttg_seconds=ttg_seconds, provider=str(model_provider or ""), model=str(model_name or "")
)
# Record token usage
if hasattr(self.trace_client, "record_token_usage"):
input_tokens = int(trace_info.message_tokens or 0)
output_tokens = int(trace_info.answer_tokens or 0)
if input_tokens > 0:
self.trace_client.record_token_usage(
token_count=input_tokens,
token_type="input",
operation_name="chat",
request_model=str(model_name or ""),
response_model=str(model_name or ""),
server_address=str(model_provider or ""),
provider=str(model_provider or ""),
)
if output_tokens > 0:
self.trace_client.record_token_usage(
token_count=output_tokens,
token_type="output",
operation_name="chat",
request_model=str(model_name or ""),
response_model=str(model_name or ""),
server_address=str(model_provider or ""),
provider=str(model_provider or ""),
)
except Exception:
logger.debug("[Tencent APM] Failed to record message LLM metrics")
def _record_workflow_trace_duration(self, trace_info: WorkflowTraceInfo) -> None:
"""Record end-to-end workflow trace duration."""
try:
if not hasattr(self.trace_client, "record_trace_duration"):
return
# Calculate duration from start_time and end_time to match span duration
if trace_info.start_time and trace_info.end_time:
duration_s = (trace_info.end_time - trace_info.start_time).total_seconds()
else:
# Fallback to workflow_run_elapsed_time if timestamps not available
duration_s = float(trace_info.workflow_run_elapsed_time)
if duration_s > 0:
attributes = {
"conversation_mode": "workflow",
"workflow_status": trace_info.workflow_run_status,
}
# Add conversation_id if available
if trace_info.conversation_id:
attributes["has_conversation"] = "true"
else:
attributes["has_conversation"] = "false"
self.trace_client.record_trace_duration(duration_s, attributes)
except Exception:
logger.debug("[Tencent APM] Failed to record workflow trace duration")
def _record_message_trace_duration(self, trace_info: MessageTraceInfo) -> None:
"""Record end-to-end message trace duration."""
try:
if not hasattr(self.trace_client, "record_trace_duration"):
return
# Calculate duration from start_time and end_time
if trace_info.start_time and trace_info.end_time:
duration = (trace_info.end_time - trace_info.start_time).total_seconds()
if duration > 0:
attributes = {
"conversation_mode": trace_info.conversation_mode,
}
# Add streaming flag if available
if hasattr(trace_info, "is_streaming_request"):
attributes["stream"] = "true" if trace_info.is_streaming_request else "false"
self.trace_client.record_trace_duration(duration, attributes)
except Exception:
logger.debug("[Tencent APM] Failed to record message trace duration")
def __del__(self): def __del__(self):
"""Ensure proper cleanup on garbage collection.""" """Ensure proper cleanup on garbage collection."""
try: try:

View File

@ -3,6 +3,7 @@ import io
import json import json
import logging import logging
import re import re
import time
from collections.abc import Generator, Mapping, Sequence from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Literal from typing import TYPE_CHECKING, Any, Literal
@ -384,6 +385,8 @@ class LLMNode(Node):
output_schema = LLMNode.fetch_structured_output_schema( output_schema = LLMNode.fetch_structured_output_schema(
structured_output=structured_output or {}, structured_output=structured_output or {},
) )
request_start_time = time.perf_counter()
invoke_result = invoke_llm_with_structured_output( invoke_result = invoke_llm_with_structured_output(
provider=model_instance.provider, provider=model_instance.provider,
model_schema=model_schema, model_schema=model_schema,
@ -396,6 +399,8 @@ class LLMNode(Node):
user=user_id, user=user_id,
) )
else: else:
request_start_time = time.perf_counter()
invoke_result = model_instance.invoke_llm( invoke_result = model_instance.invoke_llm(
prompt_messages=list(prompt_messages), prompt_messages=list(prompt_messages),
model_parameters=node_data_model.completion_params, model_parameters=node_data_model.completion_params,
@ -411,6 +416,7 @@ class LLMNode(Node):
node_id=node_id, node_id=node_id,
node_type=node_type, node_type=node_type,
reasoning_format=reasoning_format, reasoning_format=reasoning_format,
request_start_time=request_start_time,
) )
@staticmethod @staticmethod
@ -422,14 +428,20 @@ class LLMNode(Node):
node_id: str, node_id: str,
node_type: NodeType, node_type: NodeType,
reasoning_format: Literal["separated", "tagged"] = "tagged", reasoning_format: Literal["separated", "tagged"] = "tagged",
request_start_time: float | None = None,
) -> Generator[NodeEventBase | LLMStructuredOutput, None, None]: ) -> Generator[NodeEventBase | LLMStructuredOutput, None, None]:
# For blocking mode # For blocking mode
if isinstance(invoke_result, LLMResult): if isinstance(invoke_result, LLMResult):
duration = None
if request_start_time is not None:
duration = time.perf_counter() - request_start_time
invoke_result.usage.latency = round(duration, 3)
event = LLMNode.handle_blocking_result( event = LLMNode.handle_blocking_result(
invoke_result=invoke_result, invoke_result=invoke_result,
saver=file_saver, saver=file_saver,
file_outputs=file_outputs, file_outputs=file_outputs,
reasoning_format=reasoning_format, reasoning_format=reasoning_format,
request_latency=duration,
) )
yield event yield event
return return
@ -441,6 +453,12 @@ class LLMNode(Node):
usage = LLMUsage.empty_usage() usage = LLMUsage.empty_usage()
finish_reason = None finish_reason = None
full_text_buffer = io.StringIO() full_text_buffer = io.StringIO()
# Initialize streaming metrics tracking
start_time = request_start_time if request_start_time is not None else time.perf_counter()
first_token_time = None
has_content = False
collected_structured_output = None # Collect structured_output from streaming chunks collected_structured_output = None # Collect structured_output from streaming chunks
# Consume the invoke result and handle generator exception # Consume the invoke result and handle generator exception
try: try:
@ -457,6 +475,11 @@ class LLMNode(Node):
file_saver=file_saver, file_saver=file_saver,
file_outputs=file_outputs, file_outputs=file_outputs,
): ):
# Detect first token for TTFT calculation
if text_part and not has_content:
first_token_time = time.perf_counter()
has_content = True
full_text_buffer.write(text_part) full_text_buffer.write(text_part)
yield StreamChunkEvent( yield StreamChunkEvent(
selector=[node_id, "text"], selector=[node_id, "text"],
@ -489,6 +512,16 @@ class LLMNode(Node):
# Extract clean text and reasoning from <think> tags # Extract clean text and reasoning from <think> tags
clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format) clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format)
# Calculate streaming metrics
end_time = time.perf_counter()
total_duration = end_time - start_time
usage.latency = round(total_duration, 3)
if has_content and first_token_time:
gen_ai_server_time_to_first_token = first_token_time - start_time
llm_streaming_time_to_generate = end_time - first_token_time
usage.time_to_first_token = round(gen_ai_server_time_to_first_token, 3)
usage.time_to_generate = round(llm_streaming_time_to_generate, 3)
yield ModelInvokeCompletedEvent( yield ModelInvokeCompletedEvent(
# Use clean_text for separated mode, full_text for tagged mode # Use clean_text for separated mode, full_text for tagged mode
text=clean_text if reasoning_format == "separated" else full_text, text=clean_text if reasoning_format == "separated" else full_text,
@ -1068,6 +1101,7 @@ class LLMNode(Node):
saver: LLMFileSaver, saver: LLMFileSaver,
file_outputs: list["File"], file_outputs: list["File"],
reasoning_format: Literal["separated", "tagged"] = "tagged", reasoning_format: Literal["separated", "tagged"] = "tagged",
request_latency: float | None = None,
) -> ModelInvokeCompletedEvent: ) -> ModelInvokeCompletedEvent:
buffer = io.StringIO() buffer = io.StringIO()
for text_part in LLMNode._save_multimodal_output_and_convert_result_to_markdown( for text_part in LLMNode._save_multimodal_output_and_convert_result_to_markdown(
@ -1088,7 +1122,7 @@ class LLMNode(Node):
# Extract clean text and reasoning from <think> tags # Extract clean text and reasoning from <think> tags
clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format) clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format)
return ModelInvokeCompletedEvent( event = ModelInvokeCompletedEvent(
# Use clean_text for separated mode, full_text for tagged mode # Use clean_text for separated mode, full_text for tagged mode
text=clean_text if reasoning_format == "separated" else full_text, text=clean_text if reasoning_format == "separated" else full_text,
usage=invoke_result.usage, usage=invoke_result.usage,
@ -1098,6 +1132,9 @@ class LLMNode(Node):
# Pass structured output if enabled # Pass structured output if enabled
structured_output=getattr(invoke_result, "structured_output", None), structured_output=getattr(invoke_result, "structured_output", None),
) )
if request_latency is not None:
event.usage.latency = round(request_latency, 3)
return event
@staticmethod @staticmethod
def save_multimodal_image_output( def save_multimodal_image_output(

View File

@ -1,5 +1,5 @@
version = 1 version = 1
revision = 3 revision = 2
requires-python = ">=3.11, <3.13" requires-python = ">=3.11, <3.13"
resolution-markers = [ resolution-markers = [
"python_full_version >= '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", "python_full_version >= '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'",

View File

@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="120px" height="27px" viewBox="0 0 80 18" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>logo</title>
<g id="页面-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="logo" fill-rule="nonzero">
<g id="XMLID_25_" transform="translate(30.592488, 1.100000)" fill="#253554">
<path d="M30.8788968,0.6 L21.8088578,0.6 L21.8088578,1.9 L24.5604427,1.9 L24.5604427,6.7 L21.2993051,6.7 L21.2993051,8 L24.5604427,8 L24.5604427,15.9 L26.089101,15.9 L26.089101,8 L29.5540597,8 L29.5540597,15.6 L32.3056445,15.6 L32.3056445,14.3 L31.0827179,14.3 L31.0827179,0.6 L30.8788968,0.6 Z M25.9871904,6.5 L25.9871904,1.9 L29.5540597,1.9 L29.5540597,6.7 L26.089101,6.7 L26.089101,6.5 L25.9871904,6.5 Z" id="XMLID_38_"></path>
<polygon id="XMLID_14_" points="5.60508028 12.2 12.8407294 12.2 12.8407294 13.5 5.60508028 13.5"></polygon>
<path d="M0.611463304,9.8 C0.611463304,12.1 0.509552753,14 0,15.5 C0,15.6 0,15.6 0.101910551,15.6 C0.101910551,15.6 1.22292661,15.6 1.42674771,15.6 C1.93630046,13.4 1.93630046,11.6 1.93630046,10.3 L3.77069037,10.3 L3.77069037,14.3 L2.54776377,14.3 C2.44585321,14.3 2.44585321,14.3 2.44585321,14.4 L2.85349542,15.6 L5.19743808,15.6 L5.19743808,0.6 L0.713373854,0.6 L0.611463304,9.8 L0.611463304,9.8 Z M2.03821101,9.2 L2.03821101,6.2 L3.87260092,6.2 L3.87260092,9.4 L2.03821101,9.4 L2.03821101,9.2 Z M3.87260092,1.9 L3.87260092,5 L2.03821101,5 L2.03821101,1.9 L3.87260092,1.9 Z" id="XMLID_33_"></path>
<path d="M13.3502821,5.9 L15.0827615,5.9 L15.0827615,4.7 L9.88532341,4.7 C9.98723396,4.3 10.0891445,3.8 10.3948762,3.5 L14.8789404,3.5 L14.8789404,2.3 L13.6560138,2.3 C13.7579243,1.6 14.1655665,0.7 14.1655665,0.7 C14.1655665,0.6 14.1655665,0.6 14.063656,0.6 L12.9426399,0.6 L12.4330872,2.3 L10.8025184,2.3 C10.9044289,1.6 11.0063395,0.8 11.2101606,0.1 C11.2101606,0 11.2101606,0 11.10825,0 C11.0063395,0 10.1910551,0 9.88532341,0 C9.78341286,0.9 9.68150231,1.7 9.37577066,2.4 L8.4585757,2.4 L7.94902295,0.7 L6.82800689,0.7 C6.72609634,0.7 6.72609634,0.7 6.72609634,0.8 C6.72609634,0.9 6.92991744,1.7 7.23564909,2.4 L6.01272249,2.4 L6.01272249,3.6 L8.8662179,3.6 C8.76430735,4 8.6623968,4.5 8.35666515,4.8 L5.60508028,4.8 L5.60508028,6 L7.74520185,6 C6.82800689,7.2 6.01272249,7.7 5.60508028,8 C5.60508028,8.1 5.60508028,9.3 5.60508028,9.3 C5.60508028,9.4 5.70699083,9.4 5.80890138,9.3 C6.21654359,9.2 6.72609634,8.8 7.03182799,8.4 L12.025445,8.4 L12.025445,10.2 L8.15284405,10.2 L8.2547546,9.1 C8.2547546,9 8.2547546,9 8.15284405,9 C8.0509335,9 6.92991744,9 6.92991744,9 L6.82800689,11.2 C6.82800689,11.3 6.82800689,11.3 6.92991744,11.3 C7.03182799,11.3 13.6560138,11.3 13.6560138,11.3 L13.6560138,14.5 L10.7006078,14.5 C10.5986973,14.5 10.5986973,14.5 10.5986973,14.6 L11.0063395,15.8 L15.2865826,15.8 L15.2865826,10.2 L13.6560138,10.2 L13.6560138,7.8 C14.2674771,8.3 14.8789404,8.8 15.4904037,9 C15.5923142,9.1 15.6942248,9.1 15.6942248,9 C15.6942248,9 15.6942248,7.8 15.6942248,7.7 C15.0827615,7.5 14.1655665,7 13.3502821,5.9 Z M11.7197133,5.9 C11.9235344,6.4 12.3311766,6.9 12.7388188,7.2 L8.35666515,7.2 C8.76430735,6.8 8.96812845,6.3 9.37577066,5.9 L11.7197133,5.9 L11.7197133,5.9 Z" id="XMLID_30_"></path>
<path d="M22.6241422,11.3 C22.6241422,11.3 21.4012156,12.2 20.178289,13.1 L20.178289,4.7 L16.9171514,4.7 L16.9171514,6.2 L18.7515413,6.2 L18.7515413,14.3 C18.2419886,14.7 17.8343464,14.8 17.8343464,14.8 L18.7515413,15.9 L22.7260528,13 L22.6241422,11.3 C22.9298739,11.3 22.8279633,11.2 22.6241422,11.3 Z" id="XMLID_8_"></path>
<path d="M18.9553624,3.4 L20.3821101,3.4 C20.5859312,3.4 20.5859312,3.3 20.5859312,3.3 L18.5477202,0.2 L17.019062,0.2 L16.9171514,0.3 C17.019062,0.4 18.9553624,3.4 18.9553624,3.4 Z" id="XMLID_7_"></path>
<rect id="XMLID_6_" x="35.2610505" y="0.9" width="11.4139817" height="1.5"></rect>
<path d="M39.4393831,7.8 L48.4075115,7.8 L48.4075115,6.3 L33.6304817,6.3 L33.6304817,7.8 L37.7069037,7.8 C36.7897088,10 34.8534083,15.4 34.7514978,15.5 C34.7514978,15.6 34.7514978,15.6 34.8534083,15.6 L47.5922271,15.6 C47.6941377,15.6 47.6941377,15.5 47.6941377,15.5 L45.8597478,10.6 L44.3310895,10.6 C44.229179,10.6 44.229179,10.7 44.229179,10.7 C44.229179,10.8 45.5540161,14.2 45.5540161,14.2 L37.197351,14.2 L39.4393831,7.8 Z" id="XMLID_5_"></path>
</g>
<g id="XMLID_19_">
<path d="M22.5,14.7 C22.1,15.1 21.3,15.7 19.9,15.7 C19.3,15.7 18.6,15.7 18.3,15.7 C17.9,15.7 14.9,15.7 11.3,15.7 C13.9,13.2 16.1,11.1 16.3,10.9 C16.5,10.7 17,10.2 17.5,9.8 C18.5,8.9 19.3,8.8 20,8.8 C21,8.8 21.8,9.2 22.5,9.8 C23.9,11.1 23.9,13.4 22.5,14.7 M24.2,8.2 C23.2,7.1 21.7,6.4 20.1,6.4 C18.7,6.4 17.5,6.9 16.4,7.7 C16,8.1 15.4,8.5 14.9,9.1 C14.5,9.5 5.9,17.9 5.9,17.9 C6.4,18 7,18 7.5,18 C8,18 18,18 18.4,18 C19.2,18 19.8,18 20.4,17.9 C21.7,17.8 23,17.3 24.1,16.3 C26.4,14.1 26.4,10.4 24.2,8.2 Z" id="XMLID_22_" fill="#00A3FF"></path>
<path d="M10.2,7.6 C9.1,6.8 8,6.4 6.7,6.4 C5.1,6.4 3.6,7.1 2.6,8.2 C0.4,10.5 0.4,14.1 2.7,16.4 C3.7,17.3 4.7,17.8 5.9,17.9 L8.2,15.7 C7.8,15.7 7.3,15.7 6.9,15.7 C5.6,15.6 4.8,15.2 4.3,14.7 C2.9,13.3 2.9,11.1 4.2,9.7 C4.9,9 5.7,8.7 6.7,8.7 C7.3,8.7 8.2,8.8 9.1,9.7 C9.5,10.1 10.6,10.9 11,11.3 L11.1,11.3 L12.6,9.8 L12.6,9.7 C11.9,9 10.8,8.1 10.2,7.6" id="XMLID_2_" fill="#00C8DC"></path>
<path d="M20.7,5.1 C19.6,2.1 16.7,0 13.4,0 C9.5,0 6.4,2.9 5.8,6.5 C6.1,6.5 6.4,6.4 6.8,6.4 C7.2,6.4 7.7,6.5 8.1,6.5 L8.1,6.5 C8.6,4 10.8,2.2 13.4,2.2 C15.6,2.2 17.5,3.5 18.4,5.4 C18.4,5.4 18.5,5.5 18.5,5.4 C19.2,5.3 20,5.1 20.7,5.1 C20.7,5.2 20.7,5.2 20.7,5.1" id="XMLID_1_" fill="#006EFF"></path>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.7 KiB

View File

@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg width="80px" height="18px" viewBox="0 0 80 18" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<title>logo</title>
<g id="页面-1" stroke="none" stroke-width="1" fill="none" fill-rule="evenodd">
<g id="logo" fill-rule="nonzero">
<g id="XMLID_25_" transform="translate(30.592488, 1.100000)" fill="#253554">
<path d="M30.8788968,0.6 L21.8088578,0.6 L21.8088578,1.9 L24.5604427,1.9 L24.5604427,6.7 L21.2993051,6.7 L21.2993051,8 L24.5604427,8 L24.5604427,15.9 L26.089101,15.9 L26.089101,8 L29.5540597,8 L29.5540597,15.6 L32.3056445,15.6 L32.3056445,14.3 L31.0827179,14.3 L31.0827179,0.6 L30.8788968,0.6 Z M25.9871904,6.5 L25.9871904,1.9 L29.5540597,1.9 L29.5540597,6.7 L26.089101,6.7 L26.089101,6.5 L25.9871904,6.5 Z" id="XMLID_38_"></path>
<polygon id="XMLID_14_" points="5.60508028 12.2 12.8407294 12.2 12.8407294 13.5 5.60508028 13.5"></polygon>
<path d="M0.611463304,9.8 C0.611463304,12.1 0.509552753,14 0,15.5 C0,15.6 0,15.6 0.101910551,15.6 C0.101910551,15.6 1.22292661,15.6 1.42674771,15.6 C1.93630046,13.4 1.93630046,11.6 1.93630046,10.3 L3.77069037,10.3 L3.77069037,14.3 L2.54776377,14.3 C2.44585321,14.3 2.44585321,14.3 2.44585321,14.4 L2.85349542,15.6 L5.19743808,15.6 L5.19743808,0.6 L0.713373854,0.6 L0.611463304,9.8 L0.611463304,9.8 Z M2.03821101,9.2 L2.03821101,6.2 L3.87260092,6.2 L3.87260092,9.4 L2.03821101,9.4 L2.03821101,9.2 Z M3.87260092,1.9 L3.87260092,5 L2.03821101,5 L2.03821101,1.9 L3.87260092,1.9 Z" id="XMLID_33_"></path>
<path d="M13.3502821,5.9 L15.0827615,5.9 L15.0827615,4.7 L9.88532341,4.7 C9.98723396,4.3 10.0891445,3.8 10.3948762,3.5 L14.8789404,3.5 L14.8789404,2.3 L13.6560138,2.3 C13.7579243,1.6 14.1655665,0.7 14.1655665,0.7 C14.1655665,0.6 14.1655665,0.6 14.063656,0.6 L12.9426399,0.6 L12.4330872,2.3 L10.8025184,2.3 C10.9044289,1.6 11.0063395,0.8 11.2101606,0.1 C11.2101606,0 11.2101606,0 11.10825,0 C11.0063395,0 10.1910551,0 9.88532341,0 C9.78341286,0.9 9.68150231,1.7 9.37577066,2.4 L8.4585757,2.4 L7.94902295,0.7 L6.82800689,0.7 C6.72609634,0.7 6.72609634,0.7 6.72609634,0.8 C6.72609634,0.9 6.92991744,1.7 7.23564909,2.4 L6.01272249,2.4 L6.01272249,3.6 L8.8662179,3.6 C8.76430735,4 8.6623968,4.5 8.35666515,4.8 L5.60508028,4.8 L5.60508028,6 L7.74520185,6 C6.82800689,7.2 6.01272249,7.7 5.60508028,8 C5.60508028,8.1 5.60508028,9.3 5.60508028,9.3 C5.60508028,9.4 5.70699083,9.4 5.80890138,9.3 C6.21654359,9.2 6.72609634,8.8 7.03182799,8.4 L12.025445,8.4 L12.025445,10.2 L8.15284405,10.2 L8.2547546,9.1 C8.2547546,9 8.2547546,9 8.15284405,9 C8.0509335,9 6.92991744,9 6.92991744,9 L6.82800689,11.2 C6.82800689,11.3 6.82800689,11.3 6.92991744,11.3 C7.03182799,11.3 13.6560138,11.3 13.6560138,11.3 L13.6560138,14.5 L10.7006078,14.5 C10.5986973,14.5 10.5986973,14.5 10.5986973,14.6 L11.0063395,15.8 L15.2865826,15.8 L15.2865826,10.2 L13.6560138,10.2 L13.6560138,7.8 C14.2674771,8.3 14.8789404,8.8 15.4904037,9 C15.5923142,9.1 15.6942248,9.1 15.6942248,9 C15.6942248,9 15.6942248,7.8 15.6942248,7.7 C15.0827615,7.5 14.1655665,7 13.3502821,5.9 Z M11.7197133,5.9 C11.9235344,6.4 12.3311766,6.9 12.7388188,7.2 L8.35666515,7.2 C8.76430735,6.8 8.96812845,6.3 9.37577066,5.9 L11.7197133,5.9 L11.7197133,5.9 Z" id="XMLID_30_"></path>
<path d="M22.6241422,11.3 C22.6241422,11.3 21.4012156,12.2 20.178289,13.1 L20.178289,4.7 L16.9171514,4.7 L16.9171514,6.2 L18.7515413,6.2 L18.7515413,14.3 C18.2419886,14.7 17.8343464,14.8 17.8343464,14.8 L18.7515413,15.9 L22.7260528,13 L22.6241422,11.3 C22.9298739,11.3 22.8279633,11.2 22.6241422,11.3 Z" id="XMLID_8_"></path>
<path d="M18.9553624,3.4 L20.3821101,3.4 C20.5859312,3.4 20.5859312,3.3 20.5859312,3.3 L18.5477202,0.2 L17.019062,0.2 L16.9171514,0.3 C17.019062,0.4 18.9553624,3.4 18.9553624,3.4 Z" id="XMLID_7_"></path>
<rect id="XMLID_6_" x="35.2610505" y="0.9" width="11.4139817" height="1.5"></rect>
<path d="M39.4393831,7.8 L48.4075115,7.8 L48.4075115,6.3 L33.6304817,6.3 L33.6304817,7.8 L37.7069037,7.8 C36.7897088,10 34.8534083,15.4 34.7514978,15.5 C34.7514978,15.6 34.7514978,15.6 34.8534083,15.6 L47.5922271,15.6 C47.6941377,15.6 47.6941377,15.5 47.6941377,15.5 L45.8597478,10.6 L44.3310895,10.6 C44.229179,10.6 44.229179,10.7 44.229179,10.7 C44.229179,10.8 45.5540161,14.2 45.5540161,14.2 L37.197351,14.2 L39.4393831,7.8 Z" id="XMLID_5_"></path>
</g>
<g id="XMLID_19_">
<path d="M22.5,14.7 C22.1,15.1 21.3,15.7 19.9,15.7 C19.3,15.7 18.6,15.7 18.3,15.7 C17.9,15.7 14.9,15.7 11.3,15.7 C13.9,13.2 16.1,11.1 16.3,10.9 C16.5,10.7 17,10.2 17.5,9.8 C18.5,8.9 19.3,8.8 20,8.8 C21,8.8 21.8,9.2 22.5,9.8 C23.9,11.1 23.9,13.4 22.5,14.7 M24.2,8.2 C23.2,7.1 21.7,6.4 20.1,6.4 C18.7,6.4 17.5,6.9 16.4,7.7 C16,8.1 15.4,8.5 14.9,9.1 C14.5,9.5 5.9,17.9 5.9,17.9 C6.4,18 7,18 7.5,18 C8,18 18,18 18.4,18 C19.2,18 19.8,18 20.4,17.9 C21.7,17.8 23,17.3 24.1,16.3 C26.4,14.1 26.4,10.4 24.2,8.2 Z" id="XMLID_22_" fill="#00A3FF"></path>
<path d="M10.2,7.6 C9.1,6.8 8,6.4 6.7,6.4 C5.1,6.4 3.6,7.1 2.6,8.2 C0.4,10.5 0.4,14.1 2.7,16.4 C3.7,17.3 4.7,17.8 5.9,17.9 L8.2,15.7 C7.8,15.7 7.3,15.7 6.9,15.7 C5.6,15.6 4.8,15.2 4.3,14.7 C2.9,13.3 2.9,11.1 4.2,9.7 C4.9,9 5.7,8.7 6.7,8.7 C7.3,8.7 8.2,8.8 9.1,9.7 C9.5,10.1 10.6,10.9 11,11.3 L11.1,11.3 L12.6,9.8 L12.6,9.7 C11.9,9 10.8,8.1 10.2,7.6" id="XMLID_2_" fill="#00C8DC"></path>
<path d="M20.7,5.1 C19.6,2.1 16.7,0 13.4,0 C9.5,0 6.4,2.9 5.8,6.5 C6.1,6.5 6.4,6.4 6.8,6.4 C7.2,6.4 7.7,6.5 8.1,6.5 L8.1,6.5 C8.6,4 10.8,2.2 13.4,2.2 C15.6,2.2 17.5,3.5 18.4,5.4 C18.4,5.4 18.5,5.5 18.5,5.4 C19.2,5.3 20,5.1 20.7,5.1 C20.7,5.2 20.7,5.2 20.7,5.1" id="XMLID_1_" fill="#006EFF"></path>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 5.7 KiB