mirror of https://github.com/langgenius/dify.git
Fix: Remove workflow/nodes from pyright exclusion (#26461)
Co-authored-by: google-labs-jules[bot] <161369871+google-labs-jules[bot]@users.noreply.github.com>
This commit is contained in:
parent
7242a67f84
commit
decf0f3da0
|
|
@ -87,7 +87,7 @@ class Executor:
|
|||
node_data.authorization.config.api_key
|
||||
).text
|
||||
|
||||
self.url: str = node_data.url
|
||||
self.url = node_data.url
|
||||
self.method = node_data.method
|
||||
self.auth = node_data.authorization
|
||||
self.timeout = timeout
|
||||
|
|
@ -349,11 +349,10 @@ class Executor:
|
|||
"timeout": (self.timeout.connect, self.timeout.read, self.timeout.write),
|
||||
"ssl_verify": self.ssl_verify,
|
||||
"follow_redirects": True,
|
||||
"max_retries": self.max_retries,
|
||||
}
|
||||
# request_args = {k: v for k, v in request_args.items() if v is not None}
|
||||
try:
|
||||
response: httpx.Response = _METHOD_MAP[method_lc](**request_args)
|
||||
response: httpx.Response = _METHOD_MAP[method_lc](**request_args, max_retries=self.max_retries)
|
||||
except (ssrf_proxy.MaxRetriesExceededError, httpx.RequestError) as e:
|
||||
raise HttpRequestNodeError(str(e)) from e
|
||||
# FIXME: fix type ignore, this maybe httpx type issue
|
||||
|
|
|
|||
|
|
@ -165,6 +165,8 @@ class HttpRequestNode(Node):
|
|||
body_type = typed_node_data.body.type
|
||||
data = typed_node_data.body.data
|
||||
match body_type:
|
||||
case "none":
|
||||
pass
|
||||
case "binary":
|
||||
if len(data) != 1:
|
||||
raise RequestBodyError("invalid body data, should have only one item")
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ class IfElseNode(Node):
|
|||
else:
|
||||
# TODO: Update database then remove this
|
||||
# Fallback to old structure if cases are not defined
|
||||
input_conditions, group_result, final_result = _should_not_use_old_function( # ty: ignore [deprecated]
|
||||
input_conditions, group_result, final_result = _should_not_use_old_function( # pyright: ignore [reportDeprecated]
|
||||
condition_processor=condition_processor,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
conditions=self._node_data.conditions or [],
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ class KnowledgeRetrievalNode(Node):
|
|||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
|
|
|
|||
|
|
@ -161,6 +161,8 @@ class ListOperatorNode(Node):
|
|||
elif isinstance(variable, ArrayFileSegment):
|
||||
if isinstance(condition.value, str):
|
||||
value = self.graph_runtime_state.variable_pool.convert_template(condition.value).text
|
||||
elif isinstance(condition.value, bool):
|
||||
raise ValueError(f"File filter expects a string value, got {type(condition.value)}")
|
||||
else:
|
||||
value = condition.value
|
||||
filter_func = _get_file_filter_func(
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class LLMFileSaver(tp.Protocol):
|
|||
dot (`.`). For example, `.py` and `.tar.gz` are both valid values, while `py`
|
||||
and `tar.gz` are not.
|
||||
"""
|
||||
pass
|
||||
raise NotImplementedError()
|
||||
|
||||
def save_remote_url(self, url: str, file_type: FileType) -> File:
|
||||
"""save_remote_url saves the file from a remote url returned by LLM.
|
||||
|
|
@ -56,7 +56,7 @@ class LLMFileSaver(tp.Protocol):
|
|||
:param url: the url of the file.
|
||||
:param file_type: the file type of the file, check `FileType` enum for reference.
|
||||
"""
|
||||
pass
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
EngineFactory: tp.TypeAlias = tp.Callable[[], Engine]
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ class LLMNode(Node):
|
|||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
|
|
@ -166,6 +166,7 @@ class LLMNode(Node):
|
|||
node_inputs: dict[str, Any] = {}
|
||||
process_data: dict[str, Any] = {}
|
||||
result_text = ""
|
||||
clean_text = ""
|
||||
usage = LLMUsage.empty_usage()
|
||||
finish_reason = None
|
||||
reasoning_content = None
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ class QuestionClassifierNode(Node):
|
|||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
|
|
@ -111,9 +111,9 @@ class QuestionClassifierNode(Node):
|
|||
query = variable.value if variable else None
|
||||
variables = {"query": query}
|
||||
# fetch model config
|
||||
model_instance, model_config = LLMNode._fetch_model_config(
|
||||
node_data_model=node_data.model,
|
||||
model_instance, model_config = llm_utils.fetch_model_config(
|
||||
tenant_id=self.tenant_id,
|
||||
node_data_model=node_data.model,
|
||||
)
|
||||
# fetch memory
|
||||
memory = llm_utils.fetch_memory(
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
"migrations/",
|
||||
"core/rag",
|
||||
"extensions",
|
||||
"core/workflow/nodes",
|
||||
"core/app/app_config/easy_ui_based_app/dataset"
|
||||
],
|
||||
"typeCheckingMode": "strict",
|
||||
|
|
|
|||
Loading…
Reference in New Issue