diff --git a/api/controllers/console/files.py b/api/controllers/console/files.py index 946d3db37f..ca32d29efa 100644 --- a/api/controllers/console/files.py +++ b/api/controllers/console/files.py @@ -1,6 +1,7 @@ from flask import request from flask_login import current_user from flask_restful import Resource, marshal_with +from werkzeug.exceptions import Forbidden import services from configs import dify_config @@ -58,6 +59,9 @@ class FileApi(Resource): if not file.filename: raise FilenameNotExistsError + if source == "datasets" and not current_user.is_dataset_editor: + raise Forbidden() + if source not in ("datasets", None): source = None diff --git a/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml b/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml index 812b51ddcd..e0d95a830c 100644 --- a/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml +++ b/api/core/model_runtime/model_providers/hunyuan/hunyuan.yaml @@ -3,8 +3,8 @@ label: zh_Hans: 腾讯混元 en_US: Hunyuan description: - en_US: Models provided by Tencent Hunyuan, such as hunyuan-standard, hunyuan-standard-256k, hunyuan-pro and hunyuan-lite. - zh_Hans: 腾讯混元提供的模型,例如 hunyuan-standard、 hunyuan-standard-256k, hunyuan-pro 和 hunyuan-lite。 + en_US: Models provided by Tencent Hunyuan, such as hunyuan-standard, hunyuan-standard-256k, hunyuan-pro, hunyuan-role, hunyuan-large, hunyuan-large-role, hunyuan-turbo-latest, hunyuan-large-longcontext, hunyuan-turbo, hunyuan-vision, hunyuan-turbo-vision, hunyuan-functioncall and hunyuan-lite. + zh_Hans: 腾讯混元提供的模型,例如 hunyuan-standard、 hunyuan-standard-256k, hunyuan-pro, hunyuan-role, hunyuan-large, hunyuan-large-role, hunyuan-turbo-latest, hunyuan-large-longcontext, hunyuan-turbo, hunyuan-vision, hunyuan-turbo-vision, hunyuan-functioncall 和 hunyuan-lite。 icon_small: en_US: icon_s_en.png icon_large: diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml index f494984443..6f589b3094 100644 --- a/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/hunyuan/llm/_position.yaml @@ -4,3 +4,10 @@ - hunyuan-pro - hunyuan-turbo - hunyuan-vision +- hunyuan-role +- hunyuan-large +- hunyuan-large-role +- hunyuan-large-longcontext +- hunyuan-turbo-latest +- hunyuan-turbo-vision +- hunyuan-functioncall diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-functioncall.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-functioncall.yaml new file mode 100644 index 0000000000..eb8656917c --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-functioncall.yaml @@ -0,0 +1,38 @@ +model: hunyuan-functioncall +label: + zh_Hans: hunyuan-functioncall + en_US: hunyuan-functioncall +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 32000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.004' + output: '0.008' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-longcontext.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-longcontext.yaml new file mode 100644 index 0000000000..c39724a3a9 --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-longcontext.yaml @@ -0,0 +1,38 @@ +model: hunyuan-large-longcontext +label: + zh_Hans: hunyuan-large-longcontext + en_US: hunyuan-large-longcontext +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 134000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 134000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.006' + output: '0.018' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-role.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-role.yaml new file mode 100644 index 0000000000..1b40b35ed5 --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large-role.yaml @@ -0,0 +1,38 @@ +model: hunyuan-large-role +label: + zh_Hans: hunyuan-large-role + en_US: hunyuan-large-role +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 32000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.004' + output: '0.008' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large.yaml new file mode 100644 index 0000000000..87dc104e11 --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-large.yaml @@ -0,0 +1,38 @@ +model: hunyuan-large +label: + zh_Hans: hunyuan-large + en_US: hunyuan-large +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 32000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.004' + output: '0.012' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-role.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-role.yaml new file mode 100644 index 0000000000..0f6d2c5c44 --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-role.yaml @@ -0,0 +1,38 @@ +model: hunyuan-role +label: + zh_Hans: hunyuan-role + en_US: hunyuan-role +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 32000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.004' + output: '0.008' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-latest.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-latest.yaml new file mode 100644 index 0000000000..adfa3a4c1b --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-latest.yaml @@ -0,0 +1,38 @@ +model: hunyuan-turbo-latest +label: + zh_Hans: hunyuan-turbo-latest + en_US: hunyuan-turbo-latest +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call +model_properties: + mode: chat + context_size: 32000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 32000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.015' + output: '0.05' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-vision.yaml b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-vision.yaml new file mode 100644 index 0000000000..5b9b17cc50 --- /dev/null +++ b/api/core/model_runtime/model_providers/hunyuan/llm/hunyuan-turbo-vision.yaml @@ -0,0 +1,39 @@ +model: hunyuan-turbo-vision +label: + zh_Hans: hunyuan-turbo-vision + en_US: hunyuan-turbo-vision +model_type: llm +features: + - agent-thought + - tool-call + - multi-tool-call + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 8000 +parameter_rules: + - name: temperature + use_template: temperature + - name: top_p + use_template: top_p + - name: max_tokens + use_template: max_tokens + default: 1024 + min: 1 + max: 8000 + - name: enable_enhance + label: + zh_Hans: 功能增强 + en_US: Enable Enhancement + type: boolean + help: + zh_Hans: 功能增强(如搜索)开关,关闭时将直接由主模型生成回复内容,可以降低响应时延(对于流式输出时的首字时延尤为明显)。但在少数场景里,回复效果可能会下降。 + en_US: Allow the model to perform external search to enhance the generation results. + required: false + default: true +pricing: + input: '0.08' + output: '0.08' + unit: '0.001' + currency: RMB diff --git a/api/core/model_runtime/model_providers/openai/llm/_position.yaml b/api/core/model_runtime/model_providers/openai/llm/_position.yaml index 099aae38a6..be279d9520 100644 --- a/api/core/model_runtime/model_providers/openai/llm/_position.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/_position.yaml @@ -1,4 +1,7 @@ -- gpt-4o-audio-preview +- o1 +- o1-2024-12-17 +- o1-mini +- o1-mini-2024-09-12 - gpt-4 - gpt-4o - gpt-4o-2024-05-13 @@ -7,10 +10,6 @@ - chatgpt-4o-latest - gpt-4o-mini - gpt-4o-mini-2024-07-18 -- o1-preview -- o1-preview-2024-09-12 -- o1-mini -- o1-mini-2024-09-12 - gpt-4-turbo - gpt-4-turbo-2024-04-09 - gpt-4-turbo-preview @@ -25,4 +24,7 @@ - gpt-3.5-turbo-1106 - gpt-3.5-turbo-0613 - gpt-3.5-turbo-instruct +- gpt-4o-audio-preview +- o1-preview +- o1-preview-2024-09-12 - text-davinci-003 diff --git a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml index b47449a49a..19a5399a73 100644 --- a/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/chatgpt-4o-latest.yaml @@ -22,7 +22,7 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 max: 16384 - name: response_format diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml index b630d6f630..2c86ec9460 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-05-13.yaml @@ -22,9 +22,9 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 - max: 4096 + max: 16384 - name: response_format label: zh_Hans: 回复格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml index 73b7f69700..cabbe98717 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-08-06.yaml @@ -22,7 +22,7 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 max: 16384 - name: response_format diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-11-20.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-11-20.yaml index ebd5ab38c3..2c7c1c6eb5 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-11-20.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-2024-11-20.yaml @@ -22,7 +22,7 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 max: 16384 - name: response_format diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-audio-preview.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-audio-preview.yaml index 6571cd094f..e707acc507 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-audio-preview.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-audio-preview.yaml @@ -22,9 +22,9 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 - max: 4096 + max: 16384 - name: response_format label: zh_Hans: 回复格式 diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml index df38270f79..0c1b74c513 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini-2024-07-18.yaml @@ -22,7 +22,7 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 max: 16384 - name: response_format diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml index 5e3c94fbe2..0d52f06339 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o-mini.yaml @@ -22,7 +22,7 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 max: 16384 - name: response_format diff --git a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml index 3090a9e090..a4681fe18d 100644 --- a/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml +++ b/api/core/model_runtime/model_providers/openai/llm/gpt-4o.yaml @@ -22,9 +22,9 @@ parameter_rules: use_template: frequency_penalty - name: max_tokens use_template: max_tokens - default: 512 + default: 16384 min: 1 - max: 4096 + max: 16384 - name: response_format label: zh_Hans: 回复格式 @@ -38,7 +38,7 @@ parameter_rules: - text - json_object pricing: - input: '5.00' - output: '15.00' + input: '2.50' + output: '10.00' unit: '0.000001' currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1-2024-12-17.yaml b/api/core/model_runtime/model_providers/openai/llm/o1-2024-12-17.yaml new file mode 100644 index 0000000000..7acbd0e2b1 --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1-2024-12-17.yaml @@ -0,0 +1,35 @@ +model: o1-2024-12-17 +label: + en_US: o1-2024-12-17 +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 200000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 50000 + min: 1 + max: 50000 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '15.00' + output: '60.00' + unit: '0.000001' + currency: USD diff --git a/api/core/model_runtime/model_providers/openai/llm/o1.yaml b/api/core/model_runtime/model_providers/openai/llm/o1.yaml new file mode 100644 index 0000000000..3a84cf418e --- /dev/null +++ b/api/core/model_runtime/model_providers/openai/llm/o1.yaml @@ -0,0 +1,36 @@ +model: o1 +label: + zh_Hans: o1 + en_US: o1 +model_type: llm +features: + - multi-tool-call + - agent-thought + - stream-tool-call + - vision +model_properties: + mode: chat + context_size: 200000 +parameter_rules: + - name: max_tokens + use_template: max_tokens + default: 50000 + min: 1 + max: 50000 + - name: response_format + label: + zh_Hans: 回复格式 + en_US: response_format + type: string + help: + zh_Hans: 指定模型必须输出的格式 + en_US: specifying the format that the model must output + required: false + options: + - text + - json_object +pricing: + input: '15.00' + output: '60.00' + unit: '0.000001' + currency: USD diff --git a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py index aa2bb01842..8646e52cf4 100644 --- a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py +++ b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py @@ -49,10 +49,10 @@ class LindormVectorStoreConfig(BaseModel): class LindormVectorStore(BaseVector): - def __init__(self, collection_name: str, config: LindormVectorStoreConfig, **kwargs): + def __init__(self, collection_name: str, config: LindormVectorStoreConfig, using_ugc: bool, **kwargs): self._routing = None self._routing_field = None - if config.using_ugc: + if using_ugc: routing_value: str = kwargs.get("routing_value") if routing_value is None: raise ValueError("UGC index should init vector with valid 'routing_value' parameter value") @@ -64,7 +64,7 @@ class LindormVectorStore(BaseVector): super().__init__(collection_name.lower()) self._client_config = config self._client = OpenSearch(**config.to_opensearch_params()) - self._using_ugc = config.using_ugc + self._using_ugc = using_ugc self.kwargs = kwargs def get_type(self) -> str: @@ -467,12 +467,16 @@ class LindormVectorStoreFactory(AbstractVectorFactory): using_ugc = dify_config.USING_UGC_INDEX routing_value = None if dataset.index_struct: - if using_ugc: + # if an existed record's index_struct_dict doesn't contain using_ugc field, + # it actually stores in the normal index format + stored_in_ugc = dataset.index_struct_dict.get("using_ugc", False) + using_ugc = stored_in_ugc + if stored_in_ugc: dimension = dataset.index_struct_dict["dimension"] index_type = dataset.index_struct_dict["index_type"] distance_type = dataset.index_struct_dict["distance_type"] - index_name = f"{UGC_INDEX_PREFIX}_{dimension}_{index_type}_{distance_type}" routing_value = dataset.index_struct_dict["vector_store"]["class_prefix"] + index_name = f"{UGC_INDEX_PREFIX}_{dimension}_{index_type}_{distance_type}" else: index_name = dataset.index_struct_dict["vector_store"]["class_prefix"] else: @@ -487,6 +491,7 @@ class LindormVectorStoreFactory(AbstractVectorFactory): "index_type": index_type, "dimension": dimension, "distance_type": distance_type, + "using_ugc": using_ugc, } dataset.index_struct = json.dumps(index_struct_dict) if using_ugc: @@ -494,4 +499,4 @@ class LindormVectorStoreFactory(AbstractVectorFactory): routing_value = class_prefix else: index_name = class_prefix - return LindormVectorStore(index_name, lindorm_config, routing_value=routing_value) + return LindormVectorStore(index_name, lindorm_config, routing_value=routing_value, using_ugc=using_ugc) diff --git a/api/extensions/storage/opendal_storage.py b/api/extensions/storage/opendal_storage.py index 381007199a..e671eff059 100644 --- a/api/extensions/storage/opendal_storage.py +++ b/api/extensions/storage/opendal_storage.py @@ -34,7 +34,6 @@ class OpenDALStorage(BaseStorage): root = kwargs.get("root", "storage") Path(root).mkdir(parents=True, exist_ok=True) - # self.op = opendal.Operator(scheme=scheme, **kwargs) self.op = opendal.Operator(scheme=scheme, **kwargs) logger.debug(f"opendal operator created with scheme {scheme}") retry_layer = opendal.layers.RetryLayer(max_times=3, factor=2.0, jitter=True) diff --git a/api/services/feature_service.py b/api/services/feature_service.py index 6bd82a2757..0386c6acea 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -63,6 +63,7 @@ class SystemFeatureModel(BaseModel): enable_social_oauth_login: bool = False is_allow_register: bool = False is_allow_create_workspace: bool = False + is_email_setup: bool = False license: LicenseModel = LicenseModel() @@ -98,6 +99,7 @@ class FeatureService: system_features.enable_social_oauth_login = dify_config.ENABLE_SOCIAL_OAUTH_LOGIN system_features.is_allow_register = dify_config.ALLOW_REGISTER system_features.is_allow_create_workspace = dify_config.ALLOW_CREATE_WORKSPACE + system_features.is_email_setup = dify_config.MAIL_TYPE is not None and dify_config.MAIL_TYPE != "" @classmethod def _fulfill_params_from_env(cls, features: FeatureModel): diff --git a/docker/.env.example b/docker/.env.example index cd48c9e3a1..0a5dffc570 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -284,8 +284,11 @@ CONSOLE_CORS_ALLOW_ORIGINS=* # The type of storage to use for storing user files. STORAGE_TYPE=opendal -# Apache OpenDAL Configuration, refer to https://github.com/apache/opendal -# The scheme for the OpenDAL storage. +# Apache OpenDAL Configuration +# The configuration for OpenDAL consists of the following format: OPENDAL__. +# You can find all the service configurations (CONFIG_NAME) in the repository at: https://github.com/apache/opendal/tree/main/core/src/services. +# Dify will scan configurations starting with OPENDAL_ and automatically apply them. +# The scheme name for the OpenDAL storage. OPENDAL_SCHEME=fs # Configurations for OpenDAL Local File System. OPENDAL_FS_ROOT=storage @@ -919,4 +922,4 @@ MAX_SUBMIT_COUNT=100 # Proxy HTTP_PROXY= -HTTPS_PROXY= \ No newline at end of file +HTTPS_PROXY= diff --git a/web/app/components/develop/template/template_advanced_chat.zh.mdx b/web/app/components/develop/template/template_advanced_chat.zh.mdx index 734e52ae58..fec0636d40 100755 --- a/web/app/components/develop/template/template_advanced_chat.zh.mdx +++ b/web/app/components/develop/template/template_advanced_chat.zh.mdx @@ -3,7 +3,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' # 工作流编排对话型应用 API -对话应用支持会话持久化,可将之前的聊天记录作为上下进行回答,可适用于聊天/客服 AI 等。 +对话应用支持会话持久化,可将之前的聊天记录作为上下文进行回答,可适用于聊天/客服 AI 等。
### 基础 URL diff --git a/web/app/components/develop/template/template_chat.zh.mdx b/web/app/components/develop/template/template_chat.zh.mdx index 70242623b7..af96cab5ff 100644 --- a/web/app/components/develop/template/template_chat.zh.mdx +++ b/web/app/components/develop/template/template_chat.zh.mdx @@ -3,7 +3,7 @@ import { Row, Col, Properties, Property, Heading, SubProperty } from '../md.tsx' # 对话型应用 API -对话应用支持会话持久化,可将之前的聊天记录作为上下进行回答,可适用于聊天/客服 AI 等。 +对话应用支持会话持久化,可将之前的聊天记录作为上下文进行回答,可适用于聊天/客服 AI 等。
### 基础 URL diff --git a/web/app/signin/components/mail-and-password-auth.tsx b/web/app/signin/components/mail-and-password-auth.tsx index 210c877bb7..97f8bd1bec 100644 --- a/web/app/signin/components/mail-and-password-auth.tsx +++ b/web/app/signin/components/mail-and-password-auth.tsx @@ -12,12 +12,13 @@ import I18NContext from '@/context/i18n' type MailAndPasswordAuthProps = { isInvite: boolean + isEmailSetup: boolean allowRegistration: boolean } const passwordRegex = /^(?=.*[a-zA-Z])(?=.*\d).{8,}$/ -export default function MailAndPasswordAuth({ isInvite, allowRegistration }: MailAndPasswordAuthProps) { +export default function MailAndPasswordAuth({ isInvite, isEmailSetup, allowRegistration }: MailAndPasswordAuthProps) { const { t } = useTranslation() const { locale } = useContext(I18NContext) const router = useRouter() @@ -124,7 +125,12 @@ export default function MailAndPasswordAuth({ isInvite, allowRegistration }: Mai
diff --git a/web/app/signin/normalForm.tsx b/web/app/signin/normalForm.tsx index 783d8ac507..1911fa35c6 100644 --- a/web/app/signin/normalForm.tsx +++ b/web/app/signin/normalForm.tsx @@ -163,7 +163,7 @@ const NormalForm = () => {
} } {systemFeatures.enable_email_password_login && authType === 'password' && <> - + {systemFeatures.enable_email_code_login &&
{ updateAuthType('code') }}> {t('login.useVerificationCode')}
} diff --git a/web/types/feature.ts b/web/types/feature.ts index 47e8e1aad1..053ce3d7c9 100644 --- a/web/types/feature.ts +++ b/web/types/feature.ts @@ -29,6 +29,7 @@ export type SystemFeatures = { enable_social_oauth_login: boolean is_allow_create_workspace: boolean is_allow_register: boolean + is_email_setup: boolean license: License } @@ -43,6 +44,7 @@ export const defaultSystemFeatures: SystemFeatures = { enable_social_oauth_login: false, is_allow_create_workspace: false, is_allow_register: false, + is_email_setup: false, license: { status: LicenseStatus.NONE, expired_at: '',