This commit is contained in:
Asuka Minato 2026-05-08 17:09:44 +09:00
parent 132637d6fc
commit e125f7400d
6 changed files with 362 additions and 951 deletions

View File

@ -34,29 +34,30 @@ def generate_markdown_docs(swagger_dir: Path, markdown_dir: Path, *, keep_swagge
markdown_dir.mkdir(parents=True, exist_ok=True)
written_paths: list[Path] = []
for target in SPEC_TARGETS:
swagger_path = swagger_paths_by_name[target.filename]
markdown_path = markdown_dir / f"{swagger_path.stem}.md"
subprocess.run(
[
"npx",
"--yes",
SWAGGER_MARKDOWN_PACKAGE,
"-i",
str(swagger_path),
"-o",
str(markdown_path),
],
check=True,
)
written_paths.append(markdown_path)
if not keep_swagger_json:
if swagger_dir == markdown_dir or markdown_dir.is_relative_to(swagger_dir):
for path in swagger_paths:
path.unlink()
else:
shutil.rmtree(swagger_dir)
try:
for target in SPEC_TARGETS:
swagger_path = swagger_paths_by_name[target.filename]
markdown_path = markdown_dir / f"{swagger_path.stem}.md"
subprocess.run(
[
"npx",
"--yes",
SWAGGER_MARKDOWN_PACKAGE,
"-i",
str(swagger_path),
"-o",
str(markdown_path),
],
check=True,
)
written_paths.append(markdown_path)
finally:
if not keep_swagger_json:
if swagger_dir == markdown_dir or markdown_dir.is_relative_to(swagger_dir):
for path in swagger_paths:
path.unlink(missing_ok=True)
else:
shutil.rmtree(swagger_dir, ignore_errors=True)
return written_paths

View File

@ -17,7 +17,7 @@ import sys
from collections.abc import MutableMapping
from dataclasses import dataclass
from pathlib import Path
from typing import Protocol
from typing import Protocol, TypeGuard
from flask import Flask
from flask_restx.swagger import Swagger
@ -52,6 +52,14 @@ _ORIGINAL_REGISTER_MODEL = Swagger.register_model
_ORIGINAL_REGISTER_FIELD = Swagger.register_field
def _is_inline_field_map(value: object) -> TypeGuard[dict[object, object]]:
"""Return whether a nested field map is an anonymous inline mapping."""
from flask_restx.model import Model, OrderedModel
return isinstance(value, dict) and not isinstance(value, (Model, OrderedModel))
def _jsonable_schema_value(value: object) -> object:
"""Return a deterministic JSON-serializable representation for schema fingerprints."""
@ -61,7 +69,8 @@ def _jsonable_schema_value(value: object) -> object:
return [_jsonable_schema_value(item) for item in value]
if isinstance(value, dict):
return {str(key): _jsonable_schema_value(item) for key, item in value.items()}
return repr(value)
value_type = type(value)
return f"<{value_type.__module__}.{value_type.__qualname__}>"
def _field_signature(field: object) -> object:
@ -77,10 +86,14 @@ def _field_signature(field: object) -> object:
if isinstance(field_instance, fields.Nested):
nested = getattr(field_instance, "nested", None)
if isinstance(nested, dict):
if _is_inline_field_map(nested):
signature["nested"] = _inline_model_signature(nested)
else:
signature["nested"] = getattr(nested, "name", repr(nested))
signature["nested"] = getattr(
nested,
"name",
f"<{type(nested).__module__}.{type(nested).__qualname__}>",
)
elif hasattr(field_instance, "container"):
signature["container"] = _field_signature(field_instance.container)
else:
@ -167,14 +180,14 @@ def _patch_swagger_for_inline_nested_dicts() -> None:
return self.api.models[anonymous_name]
def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]:
if isinstance(model, dict):
if _is_inline_field_map(model):
model = get_or_create_inline_model(self, model)
return _ORIGINAL_REGISTER_MODEL(self, model)
def register_field_with_inline_dict_support(self: Swagger, field: object) -> None:
nested = getattr(field, "nested", None)
if isinstance(nested, dict):
if _is_inline_field_map(nested):
field.model = get_or_create_inline_model(self, nested) # type: ignore
_ORIGINAL_REGISTER_FIELD(self, field)
@ -193,13 +206,20 @@ def create_spec_app() -> Flask:
app = Flask(__name__)
from controllers.console import bp as console_bp
from controllers.console import console_ns
from controllers.service_api import bp as service_api_bp
from controllers.service_api import service_api_ns
from controllers.web import bp as web_bp
from controllers.web import web_ns
app.register_blueprint(console_bp)
app.register_blueprint(web_bp)
app.register_blueprint(service_api_bp)
for namespace in (console_ns, web_ns, service_api_ns):
for api in namespace.apis:
_materialize_inline_model_definitions(api)
return app
@ -209,8 +229,6 @@ def _registered_models(namespace: str) -> dict[str, object]:
if namespace == "console":
from controllers.console import console_ns
for api in console_ns.apis:
_materialize_inline_model_definitions(api)
models = dict(console_ns.models)
for api in console_ns.apis:
models.update(api.models)
@ -218,8 +236,6 @@ def _registered_models(namespace: str) -> dict[str, object]:
if namespace == "web":
from controllers.web import web_ns
for api in web_ns.apis:
_materialize_inline_model_definitions(api)
models = dict(web_ns.models)
for api in web_ns.apis:
models.update(api.models)
@ -227,8 +243,6 @@ def _registered_models(namespace: str) -> dict[str, object]:
if namespace == "service":
from controllers.service_api import service_api_ns
for api in service_api_ns.apis:
_materialize_inline_model_definitions(api)
models = dict(service_api_ns.models)
for api in service_api_ns.apis:
models.update(api.models)
@ -243,13 +257,38 @@ def _materialize_inline_model_definitions(api: RestxApi) -> None:
from flask_restx import fields
from flask_restx.model import Model, OrderedModel, instance
anonymous_models: dict[int, str] = {}
inline_models: dict[int, dict[object, object]] = {}
inline_model_names: dict[int, str] = {}
def collect_field(field: object) -> None:
field_instance = instance(field)
if isinstance(field_instance, fields.Nested):
nested = getattr(field_instance, "nested", None)
if _is_inline_field_map(nested) and id(nested) not in inline_models:
inline_models[id(nested)] = nested
for nested_field in nested.values():
collect_field(nested_field)
container = getattr(field_instance, "container", None)
if container is not None:
collect_field(container)
for model in list(api.models.values()):
if isinstance(model, (Model, OrderedModel)):
for field in model.values():
collect_field(field)
for nested_fields in sorted(inline_models.values(), key=_inline_model_name):
anonymous_name = _inline_model_name(nested_fields)
inline_model_names[id(nested_fields)] = anonymous_name
if anonymous_name not in api.models:
api.model(anonymous_name, nested_fields)
def model_name_for(nested_fields: dict[object, object]) -> str:
anonymous_name = anonymous_models.get(id(nested_fields))
anonymous_name = inline_model_names.get(id(nested_fields))
if anonymous_name is None:
anonymous_name = _inline_model_name(nested_fields)
anonymous_models[id(nested_fields)] = anonymous_name
inline_model_names[id(nested_fields)] = anonymous_name
if anonymous_name not in api.models:
api.model(anonymous_name, nested_fields)
return anonymous_name
@ -258,7 +297,7 @@ def _materialize_inline_model_definitions(api: RestxApi) -> None:
field_instance = instance(field)
if isinstance(field_instance, fields.Nested):
nested = getattr(field_instance, "nested", None)
if isinstance(nested, dict):
if _is_inline_field_map(nested):
field_instance.model = api.models[model_name_for(nested)] # type: ignore[attr-defined]
container = getattr(field_instance, "container", None)
@ -284,6 +323,31 @@ def _drop_null_values(value: object) -> object:
return value
def _sort_swagger_arrays(value: object, *, parent_key: str | None = None) -> object:
"""Sort order-insensitive Swagger arrays so generated Markdown is stable."""
if isinstance(value, dict):
return {key: _sort_swagger_arrays(item, parent_key=key) for key, item in value.items()}
if not isinstance(value, list):
return value
sorted_items = [_sort_swagger_arrays(item, parent_key=parent_key) for item in value]
if parent_key == "parameters":
return sorted(
sorted_items,
key=lambda item: (
item.get("in", "") if isinstance(item, dict) else "",
item.get("name", "") if isinstance(item, dict) else "",
json.dumps(item, sort_keys=True, default=str),
),
)
if parent_key in {"enum", "required", "schemes", "tags"}:
string_items = [item for item in sorted_items if isinstance(item, str)]
if len(string_items) == len(sorted_items):
return sorted(string_items)
return sorted_items
def _merge_registered_definitions(payload: dict[str, object], namespace: str) -> dict[str, object]:
"""Include registered but route-indirect models in the exported Swagger definitions."""
@ -318,6 +382,7 @@ def generate_specs(output_dir: Path) -> list[Path]:
raise RuntimeError(f"unexpected response payload for {target.route}")
payload = _merge_registered_definitions(payload, target.namespace)
payload = _drop_null_values(payload)
payload = _sort_swagger_arrays(payload)
output_path = output_dir / target.filename
output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8")

File diff suppressed because it is too large Load Diff

View File

@ -65,8 +65,8 @@ Enable or disable annotation reply feature
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| action | path | Action to perform: 'enable' or 'disable' | Yes | string |
| payload | body | | Yes | [AnnotationReplyActionPayload](#annotationreplyactionpayload) |
| action | path | Action to perform: 'enable' or 'disable' | Yes | string |
##### Responses
@ -180,8 +180,8 @@ Update an existing annotation
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| annotation_id | path | Annotation ID | Yes | string |
| payload | body | | Yes | [AnnotationCreatePayload](#annotationcreatepayload) |
| annotation_id | path | Annotation ID | Yes | string |
##### Responses
@ -390,8 +390,8 @@ Rename a conversation or auto-generate a name
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| c_id | path | Conversation ID | Yes | string |
| payload | body | | Yes | [ConversationRenamePayload](#conversationrenamepayload) |
| c_id | path | Conversation ID | Yes | string |
##### Responses
@ -417,8 +417,8 @@ Conversational variables are only available for chat applications.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| c_id | path | Conversation ID | Yes | string |
| payload | body | | Yes | [ConversationVariablesQuery](#conversationvariablesquery) |
| c_id | path | Conversation ID | Yes | string |
##### Responses
@ -445,9 +445,9 @@ The value must match the variable's expected type.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) |
| c_id | path | Conversation ID | Yes | string |
| variable_id | path | Variable ID | Yes | string |
| payload | body | | Yes | [ConversationVariableUpdatePayload](#conversationvariableupdatepayload) |
##### Responses
@ -712,8 +712,8 @@ Update an existing dataset
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [DatasetUpdatePayload](#datasetupdatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -756,8 +756,8 @@ Create a new document by providing text content
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -800,8 +800,8 @@ Deprecated legacy alias for creating a new document by providing text content. U
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [DocumentTextCreatePayload](#documenttextcreatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -843,8 +843,8 @@ Download selected uploaded documents as a single ZIP archive
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [DocumentBatchDownloadZipPayload](#documentbatchdownloadzippayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -870,8 +870,8 @@ Update metadata for multiple documents
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [MetadataOperationData](#metadataoperationdata) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -909,8 +909,8 @@ Raises:
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| action | path | Action to perform: 'enable', 'disable', 'archive', or 'un_archive' | Yes | string |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -933,8 +933,8 @@ Get indexing status for documents in a batch
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| batch | path | Batch ID | Yes | string |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -1046,9 +1046,9 @@ List segments in a document
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| payload | body | | Yes | [SegmentListQuery](#segmentlistquery) |
##### Responses
@ -1067,9 +1067,9 @@ Create segments in a document
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| payload | body | | Yes | [SegmentCreatePayload](#segmentcreatepayload) |
##### Responses
@ -1112,9 +1112,9 @@ Get a specific segment by ID
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| segment_id | path | | Yes | string |
| document_id | path | | Yes | string |
| dataset_id | path | | Yes | string |
| document_id | path | | Yes | string |
| segment_id | path | | Yes | string |
##### Responses
@ -1133,10 +1133,10 @@ Update a specific segment
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| segment_id | path | Segment ID to update | Yes | string |
| payload | body | | Yes | [SegmentUpdatePayload](#segmentupdatepayload) |
##### Responses
@ -1157,10 +1157,10 @@ List child chunks for a segment
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| segment_id | path | Parent segment ID | Yes | string |
| payload | body | | Yes | [ChildChunkListQuery](#childchunklistquery) |
##### Responses
@ -1179,10 +1179,10 @@ Create a new child chunk for a segment
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| segment_id | path | Parent segment ID | Yes | string |
| payload | body | | Yes | [ChildChunkCreatePayload](#childchunkcreatepayload) |
##### Responses
@ -1203,10 +1203,10 @@ Delete a specific child chunk
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| child_chunk_id | path | Child chunk ID to delete | Yes | string |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| segment_id | path | Parent segment ID | Yes | string |
| child_chunk_id | path | Child chunk ID to delete | Yes | string |
##### Responses
@ -1225,11 +1225,11 @@ Update a specific child chunk
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) |
| child_chunk_id | path | Child chunk ID to update | Yes | string |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| segment_id | path | Parent segment ID | Yes | string |
| child_chunk_id | path | Child chunk ID to update | Yes | string |
| payload | body | | Yes | [ChildChunkUpdatePayload](#childchunkupdatepayload) |
##### Responses
@ -1273,9 +1273,9 @@ Update an existing document by providing text content
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) |
##### Responses
@ -1320,9 +1320,9 @@ Deprecated legacy alias for updating an existing document by providing text cont
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) |
| dataset_id | path | Dataset ID | Yes | string |
| document_id | path | Document ID | Yes | string |
| payload | body | | Yes | [DocumentTextUpdate](#documenttextupdate) |
##### Responses
@ -1348,8 +1348,8 @@ Tests retrieval performance for the specified dataset.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -1397,8 +1397,8 @@ Create metadata for a dataset
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [MetadataArgs](#metadataargs) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -1447,8 +1447,8 @@ Enable or disable built-in metadata field
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| action | path | Action to perform: 'enable' or 'disable' | Yes | string |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -1497,9 +1497,9 @@ Update metadata name
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) |
| dataset_id | path | Dataset ID | Yes | string |
| metadata_id | path | Metadata ID | Yes | string |
| payload | body | | Yes | [MetadataUpdatePayload](#metadataupdatepayload) |
##### Responses
@ -1599,8 +1599,8 @@ Tests retrieval performance for the specified dataset.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| dataset_id | path | Dataset ID | Yes | string |
| payload | body | | Yes | [HitTestingPayload](#hittestingpayload) |
| dataset_id | path | Dataset ID | Yes | string |
##### Responses
@ -1700,8 +1700,8 @@ Files can only be accessed if they belong to messages within the requesting app'
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| file_id | path | UUID of the file to preview | Yes | string |
| payload | body | | Yes | [FilePreviewQuery](#filepreviewquery) |
| file_id | path | UUID of the file to preview | Yes | string |
##### Responses
@ -1743,8 +1743,8 @@ Submit a paused human input form by token
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| form_token | path | Human input form token | Yes | string |
| payload | body | | Yes | [HumanInputFormSubmitPayload](#humaninputformsubmitpayload) |
| form_token | path | Human input form token | Yes | string |
##### Responses
@ -1818,8 +1818,8 @@ Allows users to rate messages as like/dislike and provide optional feedback cont
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| message_id | path | Message ID | Yes | string |
| payload | body | | Yes | [MessageFeedbackPayload](#messagefeedbackpayload) |
| message_id | path | Message ID | Yes | string |
##### Responses
@ -1956,9 +1956,9 @@ Get workflow execution events stream after resume
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| task_id | path | Workflow run ID | Yes | string |
| user | query | End user identifier (query param) | No | string |
| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string |
| continue_on_pause | query | Whether to keep the stream open across workflow_paused events,specify `"true"` to keep the stream open for `workflow_paused` events. | No | string |
| include_state_snapshot | query | Whether to replay from persisted state snapshot, specify `"true"` to include a status snapshot of executed nodes | No | string |
| user | query | End user identifier (query param) | No | string |
##### Responses
@ -2090,8 +2090,8 @@ Executes a specific workflow version identified by its ID.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| workflow_id | path | Workflow ID to execute | Yes | string |
| payload | body | | Yes | [WorkflowRunPayload](#workflowrunpayload) |
| workflow_id | path | Workflow ID to execute | Yes | string |
##### Responses
@ -2216,7 +2216,7 @@ Condition detail
| Name | Type | Description | Required |
| ---- | ---- | ----------- | -------- |
| comparison_operator | string | *Enum:* `"contains"`, `"not contains"`, `"start with"`, `"end with"`, `"is"`, `"is not"`, `"empty"`, `"not empty"`, `"in"`, `"not in"`, `"="`, `"≠"`, `">"`, `"<"`, `"≥"`, `"≤"`, `"before"`, `"after"` | Yes |
| comparison_operator | string | *Enum:* `"<"`, `"="`, `">"`, `"after"`, `"before"`, `"contains"`, `"empty"`, `"end with"`, `"in"`, `"is"`, `"is not"`, `"not contains"`, `"not empty"`, `"not in"`, `"start with"`, `"≠"`, `"≤"`, `"≥"` | Yes |
| name | string | | Yes |
| value | | | No |
@ -2226,7 +2226,7 @@ Condition detail
| ---- | ---- | ----------- | -------- |
| last_id | | Last conversation ID for pagination | No |
| limit | integer | Number of conversations to return | No |
| sort_by | string | Sort order for conversations<br>*Enum:* `"created_at"`, `"-created_at"`, `"updated_at"`, `"-updated_at"` | No |
| sort_by | string | Sort order for conversations<br>*Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No |
#### ConversationRenamePayload
@ -2460,7 +2460,7 @@ Request payload for bulk downloading documents as a zip archive.
| Name | Type | Description | Required |
| ---- | ---- | ----------- | -------- |
| name | string | | Yes |
| type | string | *Enum:* `"string"`, `"number"`, `"time"` | Yes |
| type | string | *Enum:* `"number"`, `"string"`, `"time"` | Yes |
#### MetadataDetail

View File

@ -198,8 +198,8 @@ Rename a specific conversation with a custom name or auto-generate one.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| c_id | path | Conversation UUID | Yes | string |
| name | query | New conversation name | No | string |
| auto_generate | query | Auto-generate conversation name | No | boolean |
| name | query | New conversation name | No | string |
##### Responses
@ -553,8 +553,8 @@ Submit feedback (like/dislike) for a specific message.
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| message_id | path | Message UUID | Yes | string |
| rating | query | Feedback rating | No | string |
| content | query | Feedback content | No | string |
| rating | query | Feedback rating | No | string |
##### Responses
@ -917,8 +917,8 @@ Retrieve the access mode for a web application (public or restricted).
| Name | Located in | Description | Required | Schema |
| ---- | ---------- | ----------- | -------- | ------ |
| appId | query | Application ID | No | string |
| appCode | query | Application code | No | string |
| appId | query | Application ID | No | string |
##### Responses
@ -1074,7 +1074,7 @@ Returns Server-Sent Events stream.
| last_id | | | No |
| limit | integer | | No |
| pinned | | | No |
| sort_by | string | *Enum:* `"created_at"`, `"-created_at"`, `"updated_at"`, `"-updated_at"` | No |
| sort_by | string | *Enum:* `"-created_at"`, `"-updated_at"`, `"created_at"`, `"updated_at"` | No |
#### ConversationRenamePayload

View File

@ -63,3 +63,14 @@ def test_generate_specs_writes_swagger_with_resolvable_references_and_no_nulls(t
assert refs <= set(definitions)
assert all(value is not None for value in _walk_values(payload))
def test_generate_specs_is_idempotent(tmp_path):
module = _load_generate_swagger_specs_module()
first_paths = module.generate_specs(tmp_path / "first")
second_paths = module.generate_specs(tmp_path / "second")
assert [path.name for path in first_paths] == [path.name for path in second_paths]
for first_path, second_path in zip(first_paths, second_paths):
assert first_path.read_text(encoding="utf-8") == second_path.read_text(encoding="utf-8")