mirror of
https://github.com/langgenius/dify.git
synced 2026-02-11 08:00:13 -05:00
Compare commits
40 Commits
refactor/t
...
test/integ
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca9c2a62fb | ||
|
|
f6c03362e7 | ||
|
|
e92867884f | ||
|
|
e9db50f781 | ||
|
|
0310f631ee | ||
|
|
abc5a61e98 | ||
|
|
5f1698add6 | ||
|
|
36e50f277f | ||
|
|
704ee40caa | ||
|
|
3119c99979 | ||
|
|
16b8733886 | ||
|
|
83f64104fd | ||
|
|
a9f56716fc | ||
|
|
94eefaaee2 | ||
|
|
59f3acb021 | ||
|
|
4655a6a244 | ||
|
|
5077879886 | ||
|
|
697b57631a | ||
|
|
6015f23e79 | ||
|
|
f355c8d595 | ||
|
|
0142001fc2 | ||
|
|
5006a5e804 | ||
|
|
4058e9ae23 | ||
|
|
95310561ec | ||
|
|
de33561a52 | ||
|
|
6d9665578b | ||
|
|
5e6e8a16ce | ||
|
|
18f14c04dc | ||
|
|
14251b249d | ||
|
|
0301d6b690 | ||
|
|
755c9b0c15 | ||
|
|
f1cce53bc2 | ||
|
|
a29e74422e | ||
|
|
83ef687d00 | ||
|
|
1819bd72ef | ||
|
|
7dabc03a08 | ||
|
|
1a050c9f86 | ||
|
|
7fb6e0cdfe | ||
|
|
e0fcf33979 | ||
|
|
898e09264b |
2
api/.vscode/launch.json.example
vendored
2
api/.vscode/launch.json.example
vendored
@@ -54,7 +54,7 @@
|
||||
"--loglevel",
|
||||
"DEBUG",
|
||||
"-Q",
|
||||
"dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor"
|
||||
"dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,workflow_based_app_execution,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
@@ -259,11 +259,20 @@ class CeleryConfig(DatabaseConfig):
|
||||
description="Password of the Redis Sentinel master.",
|
||||
default=None,
|
||||
)
|
||||
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: PositiveFloat | None = Field(
|
||||
description="Timeout for Redis Sentinel socket operations in seconds.",
|
||||
default=0.1,
|
||||
)
|
||||
|
||||
CELERY_TASK_ANNOTATIONS: dict[str, Any] | None = Field(
|
||||
description=(
|
||||
"Annotations for Celery tasks as a JSON mapping of task name -> options "
|
||||
"(for example, rate limits or other task-specific settings)."
|
||||
),
|
||||
default=None,
|
||||
)
|
||||
|
||||
@computed_field
|
||||
def CELERY_RESULT_BACKEND(self) -> str | None:
|
||||
if self.CELERY_BACKEND in ("database", "rabbitmq"):
|
||||
|
||||
@@ -21,6 +21,7 @@ language_timezone_mapping = {
|
||||
"th-TH": "Asia/Bangkok",
|
||||
"id-ID": "Asia/Jakarta",
|
||||
"ar-TN": "Africa/Tunis",
|
||||
"nl-NL": "Europe/Amsterdam",
|
||||
}
|
||||
|
||||
languages = list(language_timezone_mapping.keys())
|
||||
|
||||
@@ -599,7 +599,12 @@ def _get_conversation(app_model, conversation_id):
|
||||
db.session.execute(
|
||||
sa.update(Conversation)
|
||||
.where(Conversation.id == conversation_id, Conversation.read_at.is_(None))
|
||||
.values(read_at=naive_utc_now(), read_account_id=current_user.id)
|
||||
# Keep updated_at unchanged when only marking a conversation as read.
|
||||
.values(
|
||||
read_at=naive_utc_now(),
|
||||
read_account_id=current_user.id,
|
||||
updated_at=Conversation.updated_at,
|
||||
)
|
||||
)
|
||||
db.session.commit()
|
||||
db.session.refresh(conversation)
|
||||
|
||||
@@ -42,7 +42,15 @@ class SetupResponse(BaseModel):
|
||||
tags=["console"],
|
||||
)
|
||||
def get_setup_status_api() -> SetupStatusResponse:
|
||||
"""Get system setup status."""
|
||||
"""Get system setup status.
|
||||
|
||||
NOTE: This endpoint is unauthenticated by design.
|
||||
|
||||
During first-time bootstrap there is no admin account yet, so frontend initialization must be
|
||||
able to query setup progress before any login flow exists.
|
||||
|
||||
Only bootstrap-safe status information should be returned by this endpoint.
|
||||
"""
|
||||
if dify_config.EDITION == "SELF_HOSTED":
|
||||
setup_status = get_setup_status()
|
||||
if setup_status and not isinstance(setup_status, bool):
|
||||
@@ -61,7 +69,12 @@ def get_setup_status_api() -> SetupStatusResponse:
|
||||
)
|
||||
@only_edition_self_hosted
|
||||
def setup_system(payload: SetupRequestPayload) -> SetupResponse:
|
||||
"""Initialize system setup with admin account."""
|
||||
"""Initialize system setup with admin account.
|
||||
|
||||
NOTE: This endpoint is unauthenticated by design for first-time bootstrap.
|
||||
Access is restricted by deployment mode (`SELF_HOSTED`), one-time setup guards,
|
||||
and init-password validation rather than user session authentication.
|
||||
"""
|
||||
if get_setup_status():
|
||||
raise AlreadySetupError()
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ def stream_topic_events(
|
||||
on_subscribe()
|
||||
while True:
|
||||
try:
|
||||
msg = sub.receive(timeout=0.1)
|
||||
msg = sub.receive(timeout=1)
|
||||
except SubscriptionClosedError:
|
||||
return
|
||||
if msg is None:
|
||||
|
||||
@@ -45,6 +45,8 @@ from core.app.entities.task_entities import (
|
||||
from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline
|
||||
from core.app.task_pipeline.message_cycle_manager import MessageCycleManager
|
||||
from core.base.tts import AppGeneratorTTSPublisher, AudioTrunk
|
||||
from core.file import helpers as file_helpers
|
||||
from core.file.enums import FileTransferMethod
|
||||
from core.model_manager import ModelInstance
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
@@ -56,10 +58,11 @@ from core.ops.entities.trace_entity import TraceTaskName
|
||||
from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
|
||||
from core.prompt.utils.prompt_message_util import PromptMessageUtil
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
from core.tools.signature import sign_tool_file
|
||||
from events.message_event import message_was_created
|
||||
from extensions.ext_database import db
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from models.model import AppMode, Conversation, Message, MessageAgentThought
|
||||
from models.model import AppMode, Conversation, Message, MessageAgentThought, MessageFile, UploadFile
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -463,6 +466,85 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
|
||||
metadata=metadata_dict,
|
||||
)
|
||||
|
||||
def _record_files(self):
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
message_files = session.scalars(select(MessageFile).where(MessageFile.message_id == self._message_id)).all()
|
||||
if not message_files:
|
||||
return None
|
||||
|
||||
files_list = []
|
||||
upload_file_ids = [
|
||||
mf.upload_file_id
|
||||
for mf in message_files
|
||||
if mf.transfer_method == FileTransferMethod.LOCAL_FILE and mf.upload_file_id
|
||||
]
|
||||
upload_files_map = {}
|
||||
if upload_file_ids:
|
||||
upload_files = session.scalars(select(UploadFile).where(UploadFile.id.in_(upload_file_ids))).all()
|
||||
upload_files_map = {uf.id: uf for uf in upload_files}
|
||||
|
||||
for message_file in message_files:
|
||||
upload_file = None
|
||||
if message_file.transfer_method == FileTransferMethod.LOCAL_FILE and message_file.upload_file_id:
|
||||
upload_file = upload_files_map.get(message_file.upload_file_id)
|
||||
|
||||
url = None
|
||||
filename = "file"
|
||||
mime_type = "application/octet-stream"
|
||||
size = 0
|
||||
extension = ""
|
||||
|
||||
if message_file.transfer_method == FileTransferMethod.REMOTE_URL:
|
||||
url = message_file.url
|
||||
if message_file.url:
|
||||
filename = message_file.url.split("/")[-1].split("?")[0] # Remove query params
|
||||
elif message_file.transfer_method == FileTransferMethod.LOCAL_FILE:
|
||||
if upload_file:
|
||||
url = file_helpers.get_signed_file_url(upload_file_id=str(upload_file.id))
|
||||
filename = upload_file.name
|
||||
mime_type = upload_file.mime_type or "application/octet-stream"
|
||||
size = upload_file.size or 0
|
||||
extension = f".{upload_file.extension}" if upload_file.extension else ""
|
||||
elif message_file.upload_file_id:
|
||||
# Fallback: generate URL even if upload_file not found
|
||||
url = file_helpers.get_signed_file_url(upload_file_id=str(message_file.upload_file_id))
|
||||
elif message_file.transfer_method == FileTransferMethod.TOOL_FILE and message_file.url:
|
||||
# For tool files, use URL directly if it's HTTP, otherwise sign it
|
||||
if message_file.url.startswith("http"):
|
||||
url = message_file.url
|
||||
filename = message_file.url.split("/")[-1].split("?")[0]
|
||||
else:
|
||||
# Extract tool file id and extension from URL
|
||||
url_parts = message_file.url.split("/")
|
||||
if url_parts:
|
||||
file_part = url_parts[-1].split("?")[0] # Remove query params first
|
||||
# Use rsplit to correctly handle filenames with multiple dots
|
||||
if "." in file_part:
|
||||
tool_file_id, ext = file_part.rsplit(".", 1)
|
||||
extension = f".{ext}"
|
||||
else:
|
||||
tool_file_id = file_part
|
||||
extension = ".bin"
|
||||
url = sign_tool_file(tool_file_id=tool_file_id, extension=extension)
|
||||
filename = file_part
|
||||
|
||||
transfer_method_value = message_file.transfer_method
|
||||
remote_url = message_file.url if message_file.transfer_method == FileTransferMethod.REMOTE_URL else ""
|
||||
file_dict = {
|
||||
"related_id": message_file.id,
|
||||
"extension": extension,
|
||||
"filename": filename,
|
||||
"size": size,
|
||||
"mime_type": mime_type,
|
||||
"transfer_method": transfer_method_value,
|
||||
"type": message_file.type,
|
||||
"url": url or "",
|
||||
"upload_file_id": message_file.upload_file_id or message_file.id,
|
||||
"remote_url": remote_url,
|
||||
}
|
||||
files_list.append(file_dict)
|
||||
return files_list or None
|
||||
|
||||
def _agent_message_to_stream_response(self, answer: str, message_id: str) -> AgentMessageStreamResponse:
|
||||
"""
|
||||
Agent message to stream response.
|
||||
|
||||
@@ -64,7 +64,13 @@ class MessageCycleManager:
|
||||
|
||||
# Use SQLAlchemy 2.x style session.scalar(select(...))
|
||||
with session_factory.create_session() as session:
|
||||
message_file = session.scalar(select(MessageFile).where(MessageFile.message_id == message_id))
|
||||
message_file = session.scalar(
|
||||
select(MessageFile)
|
||||
.where(
|
||||
MessageFile.message_id == message_id,
|
||||
)
|
||||
.where(MessageFile.belongs_to == "assistant")
|
||||
)
|
||||
|
||||
if message_file:
|
||||
self._message_has_file.add(message_id)
|
||||
|
||||
@@ -5,7 +5,7 @@ from collections.abc import Generator
|
||||
from copy import deepcopy
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
if TYPE_CHECKING: # pragma: no cover
|
||||
from models.model import File
|
||||
|
||||
from core.tools.__base.tool_runtime import ToolRuntime
|
||||
@@ -171,7 +171,7 @@ class Tool(ABC):
|
||||
def create_file_message(self, file: File) -> ToolInvokeMessage:
|
||||
return ToolInvokeMessage(
|
||||
type=ToolInvokeMessage.MessageType.FILE,
|
||||
message=ToolInvokeMessage.FileMessage(),
|
||||
message=ToolInvokeMessage.FileMessage(file_marker="file_marker"),
|
||||
meta={"file": file},
|
||||
)
|
||||
|
||||
|
||||
@@ -80,8 +80,14 @@ def init_app(app: DifyApp) -> Celery:
|
||||
worker_hijack_root_logger=False,
|
||||
timezone=pytz.timezone(dify_config.LOG_TZ or "UTC"),
|
||||
task_ignore_result=True,
|
||||
task_annotations=dify_config.CELERY_TASK_ANNOTATIONS,
|
||||
)
|
||||
|
||||
if dify_config.CELERY_BACKEND == "redis":
|
||||
celery_app.conf.update(
|
||||
result_backend_transport_options=broker_transport_options,
|
||||
)
|
||||
|
||||
# Apply SSL configuration if enabled
|
||||
ssl_options = _get_celery_ssl_options()
|
||||
if ssl_options:
|
||||
|
||||
@@ -119,7 +119,7 @@ class RedisClientWrapper:
|
||||
|
||||
|
||||
redis_client: RedisClientWrapper = RedisClientWrapper()
|
||||
pubsub_redis_client: RedisClientWrapper = RedisClientWrapper()
|
||||
_pubsub_redis_client: redis.Redis | RedisCluster | None = None
|
||||
|
||||
|
||||
def _get_ssl_configuration() -> tuple[type[Union[Connection, SSLConnection]], dict[str, Any]]:
|
||||
@@ -232,7 +232,7 @@ def _create_standalone_client(redis_params: dict[str, Any]) -> Union[redis.Redis
|
||||
return client
|
||||
|
||||
|
||||
def _create_pubsub_client(pubsub_url: str, use_clusters: bool) -> Union[redis.Redis, RedisCluster]:
|
||||
def _create_pubsub_client(pubsub_url: str, use_clusters: bool) -> redis.Redis | RedisCluster:
|
||||
if use_clusters:
|
||||
return RedisCluster.from_url(pubsub_url)
|
||||
return redis.Redis.from_url(pubsub_url)
|
||||
@@ -256,23 +256,19 @@ def init_app(app: DifyApp):
|
||||
redis_client.initialize(client)
|
||||
app.extensions["redis"] = redis_client
|
||||
|
||||
pubsub_client = client
|
||||
global _pubsub_redis_client
|
||||
_pubsub_redis_client = client
|
||||
if dify_config.normalized_pubsub_redis_url:
|
||||
pubsub_client = _create_pubsub_client(
|
||||
_pubsub_redis_client = _create_pubsub_client(
|
||||
dify_config.normalized_pubsub_redis_url, dify_config.PUBSUB_REDIS_USE_CLUSTERS
|
||||
)
|
||||
pubsub_redis_client.initialize(pubsub_client)
|
||||
|
||||
|
||||
def get_pubsub_redis_client() -> RedisClientWrapper:
|
||||
return pubsub_redis_client
|
||||
|
||||
|
||||
def get_pubsub_broadcast_channel() -> BroadcastChannelProtocol:
|
||||
redis_conn = get_pubsub_redis_client()
|
||||
assert _pubsub_redis_client is not None, "PubSub redis Client should be initialized here."
|
||||
if dify_config.PUBSUB_REDIS_CHANNEL_TYPE == "sharded":
|
||||
return ShardedRedisBroadcastChannel(redis_conn) # pyright: ignore[reportArgumentType]
|
||||
return RedisBroadcastChannel(redis_conn) # pyright: ignore[reportArgumentType]
|
||||
return ShardedRedisBroadcastChannel(_pubsub_redis_client)
|
||||
return RedisBroadcastChannel(_pubsub_redis_client)
|
||||
|
||||
|
||||
P = ParamSpec("P")
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import functools
|
||||
from collections.abc import Callable
|
||||
from typing import Any, TypeVar, cast
|
||||
from typing import ParamSpec, TypeVar, cast
|
||||
|
||||
from opentelemetry.trace import get_tracer
|
||||
|
||||
@@ -8,7 +8,8 @@ from configs import dify_config
|
||||
from extensions.otel.decorators.handler import SpanHandler
|
||||
from extensions.otel.runtime import is_instrument_flag_enabled
|
||||
|
||||
T = TypeVar("T", bound=Callable[..., Any])
|
||||
P = ParamSpec("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
_HANDLER_INSTANCES: dict[type[SpanHandler], SpanHandler] = {SpanHandler: SpanHandler()}
|
||||
|
||||
@@ -20,7 +21,7 @@ def _get_handler_instance(handler_class: type[SpanHandler]) -> SpanHandler:
|
||||
return _HANDLER_INSTANCES[handler_class]
|
||||
|
||||
|
||||
def trace_span(handler_class: type[SpanHandler] | None = None) -> Callable[[T], T]:
|
||||
def trace_span(handler_class: type[SpanHandler] | None = None) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
||||
"""
|
||||
Decorator that traces a function with an OpenTelemetry span.
|
||||
|
||||
@@ -30,9 +31,9 @@ def trace_span(handler_class: type[SpanHandler] | None = None) -> Callable[[T],
|
||||
:param handler_class: Optional handler class to use for this span. If None, uses the default SpanHandler.
|
||||
"""
|
||||
|
||||
def decorator(func: T) -> T:
|
||||
def decorator(func: Callable[P, R]) -> Callable[P, R]:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
|
||||
if not (dify_config.ENABLE_OTEL or is_instrument_flag_enabled()):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
@@ -46,6 +47,6 @@ def trace_span(handler_class: type[SpanHandler] | None = None) -> Callable[[T],
|
||||
kwargs=kwargs,
|
||||
)
|
||||
|
||||
return cast(T, wrapper)
|
||||
return cast(Callable[P, R], wrapper)
|
||||
|
||||
return decorator
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import inspect
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from opentelemetry.trace import SpanKind, Status, StatusCode
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class SpanHandler:
|
||||
"""
|
||||
@@ -31,9 +33,9 @@ class SpanHandler:
|
||||
|
||||
def _extract_arguments(
|
||||
self,
|
||||
wrapped: Callable[..., Any],
|
||||
args: tuple[Any, ...],
|
||||
kwargs: Mapping[str, Any],
|
||||
wrapped: Callable[..., R],
|
||||
args: tuple[object, ...],
|
||||
kwargs: Mapping[str, object],
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Extract function arguments using inspect.signature.
|
||||
@@ -62,10 +64,10 @@ class SpanHandler:
|
||||
def wrapper(
|
||||
self,
|
||||
tracer: Any,
|
||||
wrapped: Callable[..., Any],
|
||||
args: tuple[Any, ...],
|
||||
kwargs: Mapping[str, Any],
|
||||
) -> Any:
|
||||
wrapped: Callable[..., R],
|
||||
args: tuple[object, ...],
|
||||
kwargs: Mapping[str, object],
|
||||
) -> R:
|
||||
"""
|
||||
Fully control the wrapper behavior.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import logging
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from opentelemetry.trace import SpanKind, Status, StatusCode
|
||||
from opentelemetry.util.types import AttributeValue
|
||||
@@ -12,16 +12,19 @@ from models.model import Account
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
class AppGenerateHandler(SpanHandler):
|
||||
"""Span handler for ``AppGenerateService.generate``."""
|
||||
|
||||
def wrapper(
|
||||
self,
|
||||
tracer: Any,
|
||||
wrapped: Callable[..., Any],
|
||||
args: tuple[Any, ...],
|
||||
kwargs: Mapping[str, Any],
|
||||
) -> Any:
|
||||
wrapped: Callable[..., R],
|
||||
args: tuple[object, ...],
|
||||
kwargs: Mapping[str, object],
|
||||
) -> R:
|
||||
try:
|
||||
arguments = self._extract_arguments(wrapped, args, kwargs)
|
||||
if not arguments:
|
||||
|
||||
@@ -152,7 +152,7 @@ class RedisSubscriptionBase(Subscription):
|
||||
"""Iterator for consuming messages from the subscription."""
|
||||
while not self._closed.is_set():
|
||||
try:
|
||||
item = self._queue.get(timeout=0.1)
|
||||
item = self._queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from libs.broadcast_channel.channel import Producer, Subscriber, Subscription
|
||||
from redis import Redis
|
||||
from redis import Redis, RedisCluster
|
||||
|
||||
from ._subscription import RedisSubscriptionBase
|
||||
|
||||
@@ -18,7 +18,7 @@ class BroadcastChannel:
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
redis_client: Redis,
|
||||
redis_client: Redis | RedisCluster,
|
||||
):
|
||||
self._client = redis_client
|
||||
|
||||
@@ -27,7 +27,7 @@ class BroadcastChannel:
|
||||
|
||||
|
||||
class Topic:
|
||||
def __init__(self, redis_client: Redis, topic: str):
|
||||
def __init__(self, redis_client: Redis | RedisCluster, topic: str):
|
||||
self._client = redis_client
|
||||
self._topic = topic
|
||||
|
||||
|
||||
@@ -70,8 +70,9 @@ class _RedisShardedSubscription(RedisSubscriptionBase):
|
||||
# Since we have already filtered at the caller's site, we can safely set
|
||||
# `ignore_subscribe_messages=False`.
|
||||
if isinstance(self._client, RedisCluster):
|
||||
# NOTE(QuantumGhost): due to an issue in upstream code, calling `get_sharded_message`
|
||||
# would use busy-looping to wait for incoming message, consuming excessive CPU quota.
|
||||
# NOTE(QuantumGhost): due to an issue in upstream code, calling `get_sharded_message` without
|
||||
# specifying the `target_node` argument would use busy-looping to wait
|
||||
# for incoming message, consuming excessive CPU quota.
|
||||
#
|
||||
# Here we specify the `target_node` to mitigate this problem.
|
||||
node = self._client.get_node_from_key(self._topic)
|
||||
@@ -80,8 +81,10 @@ class _RedisShardedSubscription(RedisSubscriptionBase):
|
||||
timeout=1,
|
||||
target_node=node,
|
||||
)
|
||||
else:
|
||||
elif isinstance(self._client, Redis):
|
||||
return self._pubsub.get_sharded_message(ignore_subscribe_messages=False, timeout=1) # type: ignore[attr-defined]
|
||||
else:
|
||||
raise AssertionError("client should be either Redis or RedisCluster.")
|
||||
|
||||
def _get_message_type(self) -> str:
|
||||
return "smessage"
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
"""add unique constraint to tenant_default_models
|
||||
|
||||
Revision ID: fix_tenant_default_model_unique
|
||||
Revises: 9d77545f524e
|
||||
Create Date: 2026-01-19 15:07:00.000000
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def _is_pg(conn):
|
||||
return conn.dialect.name == "postgresql"
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'f55813ffe2c8'
|
||||
down_revision = 'c3df22613c99'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# First, remove duplicate records keeping only the most recent one per (tenant_id, model_type)
|
||||
# This is necessary before adding the unique constraint
|
||||
conn = op.get_bind()
|
||||
|
||||
# Delete duplicates: keep the record with the latest updated_at for each (tenant_id, model_type)
|
||||
# If updated_at is the same, keep the one with the largest id as tiebreaker
|
||||
if _is_pg(conn):
|
||||
# PostgreSQL: Use DISTINCT ON for efficient deduplication
|
||||
conn.execute(sa.text("""
|
||||
DELETE FROM tenant_default_models
|
||||
WHERE id NOT IN (
|
||||
SELECT DISTINCT ON (tenant_id, model_type) id
|
||||
FROM tenant_default_models
|
||||
ORDER BY tenant_id, model_type, updated_at DESC, id DESC
|
||||
)
|
||||
"""))
|
||||
else:
|
||||
# MySQL: Use self-join to find and delete duplicates
|
||||
# Keep the record with latest updated_at (or largest id if updated_at is equal)
|
||||
conn.execute(sa.text("""
|
||||
DELETE t1 FROM tenant_default_models t1
|
||||
INNER JOIN tenant_default_models t2
|
||||
ON t1.tenant_id = t2.tenant_id
|
||||
AND t1.model_type = t2.model_type
|
||||
AND (t1.updated_at < t2.updated_at
|
||||
OR (t1.updated_at = t2.updated_at AND t1.id < t2.id))
|
||||
"""))
|
||||
|
||||
# Now add the unique constraint
|
||||
with op.batch_alter_table('tenant_default_models', schema=None) as batch_op:
|
||||
batch_op.create_unique_constraint('unique_tenant_default_model_type', ['tenant_id', 'model_type'])
|
||||
|
||||
|
||||
def downgrade():
|
||||
with op.batch_alter_table('tenant_default_models', schema=None) as batch_op:
|
||||
batch_op.drop_constraint('unique_tenant_default_model_type', type_='unique')
|
||||
@@ -227,7 +227,7 @@ class App(Base):
|
||||
with Session(db.engine) as session:
|
||||
if api_provider_ids:
|
||||
existing_api_providers = [
|
||||
api_provider.id
|
||||
str(api_provider.id)
|
||||
for api_provider in session.execute(
|
||||
text("SELECT id FROM tool_api_providers WHERE id IN :provider_ids"),
|
||||
{"provider_ids": tuple(api_provider_ids)},
|
||||
|
||||
@@ -181,6 +181,7 @@ class TenantDefaultModel(TypeBase):
|
||||
__table_args__ = (
|
||||
sa.PrimaryKeyConstraint("id", name="tenant_default_model_pkey"),
|
||||
sa.Index("tenant_default_model_tenant_id_provider_type_idx", "tenant_id", "provider_name", "model_type"),
|
||||
sa.UniqueConstraint("tenant_id", "model_type", name="unique_tenant_default_model_type"),
|
||||
)
|
||||
|
||||
id: Mapped[str] = mapped_column(
|
||||
|
||||
@@ -1225,7 +1225,12 @@ class TenantService:
|
||||
|
||||
@staticmethod
|
||||
def remove_member_from_tenant(tenant: Tenant, account: Account, operator: Account):
|
||||
"""Remove member from tenant"""
|
||||
"""Remove member from tenant.
|
||||
|
||||
If the removed member has ``AccountStatus.PENDING`` (invited but never
|
||||
activated) and no remaining workspace memberships, the orphaned account
|
||||
record is deleted as well.
|
||||
"""
|
||||
if operator.id == account.id:
|
||||
raise CannotOperateSelfError("Cannot operate self.")
|
||||
|
||||
@@ -1235,9 +1240,31 @@ class TenantService:
|
||||
if not ta:
|
||||
raise MemberNotInTenantError("Member not in tenant.")
|
||||
|
||||
# Capture identifiers before any deletions; attribute access on the ORM
|
||||
# object may fail after commit() expires the instance.
|
||||
account_id = account.id
|
||||
account_email = account.email
|
||||
|
||||
db.session.delete(ta)
|
||||
|
||||
# Clean up orphaned pending accounts (invited but never activated)
|
||||
should_delete_account = False
|
||||
if account.status == AccountStatus.PENDING:
|
||||
# autoflush flushes ta deletion before this query, so 0 means no remaining joins
|
||||
remaining_joins = db.session.query(TenantAccountJoin).filter_by(account_id=account_id).count()
|
||||
if remaining_joins == 0:
|
||||
db.session.delete(account)
|
||||
should_delete_account = True
|
||||
|
||||
db.session.commit()
|
||||
|
||||
if should_delete_account:
|
||||
logger.info(
|
||||
"Deleted orphaned pending account: account_id=%s, email=%s",
|
||||
account_id,
|
||||
account_email,
|
||||
)
|
||||
|
||||
if dify_config.BILLING_ENABLED:
|
||||
BillingService.clean_billing_info_cache(tenant.id)
|
||||
|
||||
@@ -1245,13 +1272,13 @@ class TenantService:
|
||||
from services.enterprise.account_deletion_sync import sync_workspace_member_removal
|
||||
|
||||
sync_success = sync_workspace_member_removal(
|
||||
workspace_id=tenant.id, member_id=account.id, source="workspace_member_removed"
|
||||
workspace_id=tenant.id, member_id=account_id, source="workspace_member_removed"
|
||||
)
|
||||
if not sync_success:
|
||||
logger.warning(
|
||||
"Enterprise workspace member removal sync failed: workspace_id=%s, member_id=%s",
|
||||
tenant.id,
|
||||
account.id,
|
||||
account_id,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -22,7 +22,7 @@ from libs.exception import BaseHTTPException
|
||||
from models.human_input import RecipientType
|
||||
from models.model import App, AppMode
|
||||
from repositories.factory import DifyAPIRepositoryFactory
|
||||
from tasks.app_generate.workflow_execute_task import WORKFLOW_BASED_APP_EXECUTION_QUEUE, resume_app_execution
|
||||
from tasks.app_generate.workflow_execute_task import resume_app_execution
|
||||
|
||||
|
||||
class Form:
|
||||
@@ -230,7 +230,6 @@ class HumanInputService:
|
||||
try:
|
||||
resume_app_execution.apply_async(
|
||||
kwargs={"payload": payload},
|
||||
queue=WORKFLOW_BASED_APP_EXECUTION_QUEUE,
|
||||
)
|
||||
except Exception: # pragma: no cover
|
||||
logger.exception("Failed to enqueue resume task for workflow run %s", workflow_run_id)
|
||||
|
||||
@@ -129,15 +129,15 @@ def build_workflow_event_stream(
|
||||
return
|
||||
|
||||
try:
|
||||
event = buffer_state.queue.get(timeout=0.1)
|
||||
event = buffer_state.queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
current_time = time.time()
|
||||
if current_time - last_msg_time > idle_timeout:
|
||||
logger.debug(
|
||||
"No workflow events received for %s seconds, keeping stream open",
|
||||
"Idle timeout of %s seconds reached, closing workflow event stream.",
|
||||
idle_timeout,
|
||||
)
|
||||
last_msg_time = current_time
|
||||
return
|
||||
if current_time - last_ping_time >= ping_interval:
|
||||
yield StreamEvent.PING.value
|
||||
last_ping_time = current_time
|
||||
@@ -405,7 +405,7 @@ def _start_buffering(subscription) -> BufferState:
|
||||
dropped_count = 0
|
||||
try:
|
||||
while not buffer_state.stop_event.is_set():
|
||||
msg = subscription.receive(timeout=0.1)
|
||||
msg = subscription.receive(timeout=1)
|
||||
if msg is None:
|
||||
continue
|
||||
event = _parse_event_message(msg)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from flask_login import current_user
|
||||
|
||||
from configs import dify_config
|
||||
from enums.cloud_plan import CloudPlan
|
||||
from extensions.ext_database import db
|
||||
from models.account import Tenant, TenantAccountJoin, TenantAccountRole
|
||||
from services.account_service import TenantService
|
||||
@@ -53,7 +54,12 @@ class WorkspaceService:
|
||||
from services.credit_pool_service import CreditPoolService
|
||||
|
||||
paid_pool = CreditPoolService.get_pool(tenant_id=tenant.id, pool_type="paid")
|
||||
if paid_pool:
|
||||
# if the tenant is not on the sandbox plan and the paid pool is not full, use the paid pool
|
||||
if (
|
||||
feature.billing.subscription.plan != CloudPlan.SANDBOX
|
||||
and paid_pool is not None
|
||||
and (paid_pool.quota_limit == -1 or paid_pool.quota_limit > paid_pool.quota_used)
|
||||
):
|
||||
tenant_info["trial_credits"] = paid_pool.quota_limit
|
||||
tenant_info["trial_credits_used"] = paid_pool.quota_used
|
||||
else:
|
||||
|
||||
@@ -51,7 +51,7 @@ def _patch_redis_clients_on_loaded_modules():
|
||||
continue
|
||||
if hasattr(module, "redis_client"):
|
||||
module.redis_client = redis_mock
|
||||
if hasattr(module, "pubsub_redis_client"):
|
||||
if hasattr(module, "_pubsub_redis_client"):
|
||||
module.pubsub_redis_client = redis_mock
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ def _patch_redis_clients():
|
||||
|
||||
with (
|
||||
patch.object(ext_redis, "redis_client", redis_mock),
|
||||
patch.object(ext_redis, "pubsub_redis_client", redis_mock),
|
||||
patch.object(ext_redis, "_pubsub_redis_client", redis_mock),
|
||||
):
|
||||
_patch_redis_clients_on_loaded_modules()
|
||||
yield
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
from datetime import datetime
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from controllers.console.app.conversation import _get_conversation
|
||||
|
||||
|
||||
def test_get_conversation_mark_read_keeps_updated_at_unchanged():
|
||||
app_model = SimpleNamespace(id="app-id")
|
||||
account = SimpleNamespace(id="account-id")
|
||||
conversation = MagicMock()
|
||||
conversation.id = "conversation-id"
|
||||
|
||||
with (
|
||||
patch("controllers.console.app.conversation.current_account_with_tenant", return_value=(account, None)),
|
||||
patch("controllers.console.app.conversation.naive_utc_now", return_value=datetime(2026, 2, 9, 0, 0, 0)),
|
||||
patch("controllers.console.app.conversation.db.session") as mock_session,
|
||||
):
|
||||
mock_session.query.return_value.where.return_value.first.return_value = conversation
|
||||
|
||||
_get_conversation(app_model, "conversation-id")
|
||||
|
||||
statement = mock_session.execute.call_args[0][0]
|
||||
compiled = statement.compile()
|
||||
sql_text = str(compiled).lower()
|
||||
compact_sql_text = sql_text.replace(" ", "")
|
||||
params = compiled.params
|
||||
|
||||
assert "updated_at=current_timestamp" not in compact_sql_text
|
||||
assert "updated_at=conversations.updated_at" in compact_sql_text
|
||||
assert "read_at=:read_at" in compact_sql_text
|
||||
assert "read_account_id=:read_account_id" in compact_sql_text
|
||||
assert params["read_at"] == datetime(2026, 2, 9, 0, 0, 0)
|
||||
assert params["read_account_id"] == "account-id"
|
||||
@@ -25,15 +25,19 @@ class TestMessageCycleManagerOptimization:
|
||||
task_state = Mock()
|
||||
return MessageCycleManager(application_generate_entity=mock_application_generate_entity, task_state=task_state)
|
||||
|
||||
def test_get_message_event_type_with_message_file(self, message_cycle_manager):
|
||||
"""Test get_message_event_type returns MESSAGE_FILE when message has files."""
|
||||
def test_get_message_event_type_with_assistant_file(self, message_cycle_manager):
|
||||
"""Test get_message_event_type returns MESSAGE_FILE when message has assistant-generated files.
|
||||
|
||||
This ensures that AI-generated images (belongs_to='assistant') trigger the MESSAGE_FILE event,
|
||||
allowing the frontend to properly display generated image files with url field.
|
||||
"""
|
||||
with patch("core.app.task_pipeline.message_cycle_manager.session_factory") as mock_session_factory:
|
||||
# Setup mock session and message file
|
||||
mock_session = Mock()
|
||||
mock_session_factory.create_session.return_value.__enter__.return_value = mock_session
|
||||
|
||||
mock_message_file = Mock()
|
||||
# Current implementation uses session.scalar(select(...))
|
||||
mock_message_file.belongs_to = "assistant"
|
||||
mock_session.scalar.return_value = mock_message_file
|
||||
|
||||
# Execute
|
||||
@@ -44,6 +48,31 @@ class TestMessageCycleManagerOptimization:
|
||||
assert result == StreamEvent.MESSAGE_FILE
|
||||
mock_session.scalar.assert_called_once()
|
||||
|
||||
def test_get_message_event_type_with_user_file(self, message_cycle_manager):
|
||||
"""Test get_message_event_type returns MESSAGE when message only has user-uploaded files.
|
||||
|
||||
This is a regression test for the issue where user-uploaded images (belongs_to='user')
|
||||
caused the LLM text response to be incorrectly tagged with MESSAGE_FILE event,
|
||||
resulting in broken images in the chat UI. The query filters for belongs_to='assistant',
|
||||
so when only user files exist, the database query returns None, resulting in MESSAGE event type.
|
||||
"""
|
||||
with patch("core.app.task_pipeline.message_cycle_manager.session_factory") as mock_session_factory:
|
||||
# Setup mock session and message file
|
||||
mock_session = Mock()
|
||||
mock_session_factory.create_session.return_value.__enter__.return_value = mock_session
|
||||
|
||||
# When querying for assistant files with only user files present, return None
|
||||
# (simulates database query with belongs_to='assistant' filter returning no results)
|
||||
mock_session.scalar.return_value = None
|
||||
|
||||
# Execute
|
||||
with current_app.app_context():
|
||||
result = message_cycle_manager.get_message_event_type("test-message-id")
|
||||
|
||||
# Assert
|
||||
assert result == StreamEvent.MESSAGE
|
||||
mock_session.scalar.assert_called_once()
|
||||
|
||||
def test_get_message_event_type_without_message_file(self, message_cycle_manager):
|
||||
"""Test get_message_event_type returns MESSAGE when message has no files."""
|
||||
with patch("core.app.task_pipeline.message_cycle_manager.session_factory") as mock_session_factory:
|
||||
@@ -69,7 +98,7 @@ class TestMessageCycleManagerOptimization:
|
||||
mock_session_factory.create_session.return_value.__enter__.return_value = mock_session
|
||||
|
||||
mock_message_file = Mock()
|
||||
# Current implementation uses session.scalar(select(...))
|
||||
mock_message_file.belongs_to = "assistant"
|
||||
mock_session.scalar.return_value = mock_message_file
|
||||
|
||||
# Execute: compute event type once, then pass to message_to_stream_response
|
||||
|
||||
211
api/tests/unit_tests/core/tools/test_base_tool.py
Normal file
211
api/tests/unit_tests/core/tools/test_base_tool.py
Normal file
@@ -0,0 +1,211 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Generator
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, cast
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.tools.__base.tool import Tool
|
||||
from core.tools.__base.tool_runtime import ToolRuntime
|
||||
from core.tools.entities.common_entities import I18nObject
|
||||
from core.tools.entities.tool_entities import ToolEntity, ToolIdentity, ToolInvokeMessage, ToolProviderType
|
||||
|
||||
|
||||
class DummyCastType:
|
||||
def cast_value(self, value: Any) -> str:
|
||||
return f"cast:{value}"
|
||||
|
||||
|
||||
@dataclass
|
||||
class DummyParameter:
|
||||
name: str
|
||||
type: DummyCastType
|
||||
form: str = "llm"
|
||||
required: bool = False
|
||||
default: Any = None
|
||||
options: list[Any] | None = None
|
||||
llm_description: str | None = None
|
||||
|
||||
|
||||
class DummyTool(Tool):
|
||||
def __init__(self, entity: ToolEntity, runtime: ToolRuntime):
|
||||
super().__init__(entity=entity, runtime=runtime)
|
||||
self.result: ToolInvokeMessage | list[ToolInvokeMessage] | Generator[ToolInvokeMessage, None, None] = (
|
||||
self.create_text_message("default")
|
||||
)
|
||||
self.runtime_parameter_overrides: list[Any] | None = None
|
||||
self.last_invocation: dict[str, Any] | None = None
|
||||
|
||||
def tool_provider_type(self) -> ToolProviderType:
|
||||
return ToolProviderType.BUILT_IN
|
||||
|
||||
def _invoke(
|
||||
self,
|
||||
user_id: str,
|
||||
tool_parameters: dict[str, Any],
|
||||
conversation_id: str | None = None,
|
||||
app_id: str | None = None,
|
||||
message_id: str | None = None,
|
||||
) -> ToolInvokeMessage | list[ToolInvokeMessage] | Generator[ToolInvokeMessage, None, None]:
|
||||
self.last_invocation = {
|
||||
"user_id": user_id,
|
||||
"tool_parameters": tool_parameters,
|
||||
"conversation_id": conversation_id,
|
||||
"app_id": app_id,
|
||||
"message_id": message_id,
|
||||
}
|
||||
return self.result
|
||||
|
||||
def get_runtime_parameters(
|
||||
self,
|
||||
conversation_id: str | None = None,
|
||||
app_id: str | None = None,
|
||||
message_id: str | None = None,
|
||||
):
|
||||
if self.runtime_parameter_overrides is not None:
|
||||
return self.runtime_parameter_overrides
|
||||
return super().get_runtime_parameters(
|
||||
conversation_id=conversation_id,
|
||||
app_id=app_id,
|
||||
message_id=message_id,
|
||||
)
|
||||
|
||||
|
||||
def _build_tool(runtime: ToolRuntime | None = None) -> DummyTool:
|
||||
entity = ToolEntity(
|
||||
identity=ToolIdentity(author="test", name="dummy", label=I18nObject(en_US="dummy"), provider="test"),
|
||||
parameters=[],
|
||||
description=None,
|
||||
has_runtime_parameters=False,
|
||||
)
|
||||
runtime = runtime or ToolRuntime(tenant_id="tenant-1", invoke_from=InvokeFrom.DEBUGGER, runtime_parameters={})
|
||||
return DummyTool(entity=entity, runtime=runtime)
|
||||
|
||||
|
||||
def test_invoke_supports_single_message_and_parameter_casting():
|
||||
runtime = ToolRuntime(
|
||||
tenant_id="tenant-1",
|
||||
invoke_from=InvokeFrom.DEBUGGER,
|
||||
runtime_parameters={"from_runtime": "runtime-value"},
|
||||
)
|
||||
tool = _build_tool(runtime)
|
||||
tool.entity.parameters = cast(
|
||||
Any,
|
||||
[
|
||||
DummyParameter(name="unused", type=DummyCastType()),
|
||||
DummyParameter(name="age", type=DummyCastType()),
|
||||
],
|
||||
)
|
||||
tool.result = tool.create_text_message("ok")
|
||||
|
||||
messages = list(
|
||||
tool.invoke(
|
||||
user_id="user-1",
|
||||
tool_parameters={"age": "18", "raw": "keep"},
|
||||
conversation_id="conv-1",
|
||||
app_id="app-1",
|
||||
message_id="msg-1",
|
||||
)
|
||||
)
|
||||
|
||||
assert len(messages) == 1
|
||||
assert messages[0].message.text == "ok"
|
||||
assert tool.last_invocation == {
|
||||
"user_id": "user-1",
|
||||
"tool_parameters": {"age": "cast:18", "raw": "keep", "from_runtime": "runtime-value"},
|
||||
"conversation_id": "conv-1",
|
||||
"app_id": "app-1",
|
||||
"message_id": "msg-1",
|
||||
}
|
||||
|
||||
|
||||
def test_invoke_supports_list_and_generator_results():
|
||||
tool = _build_tool()
|
||||
tool.result = [tool.create_text_message("a"), tool.create_text_message("b")]
|
||||
list_messages = list(tool.invoke(user_id="user-1", tool_parameters={}))
|
||||
assert [msg.message.text for msg in list_messages] == ["a", "b"]
|
||||
|
||||
def _message_generator() -> Generator[ToolInvokeMessage, None, None]:
|
||||
yield tool.create_text_message("g1")
|
||||
yield tool.create_text_message("g2")
|
||||
|
||||
tool.result = _message_generator()
|
||||
generated_messages = list(tool.invoke(user_id="user-2", tool_parameters={}))
|
||||
assert [msg.message.text for msg in generated_messages] == ["g1", "g2"]
|
||||
|
||||
|
||||
def test_fork_tool_runtime_returns_new_tool_with_copied_entity():
|
||||
tool = _build_tool()
|
||||
new_runtime = ToolRuntime(tenant_id="tenant-2", invoke_from=InvokeFrom.EXPLORE, runtime_parameters={})
|
||||
|
||||
forked = tool.fork_tool_runtime(new_runtime)
|
||||
|
||||
assert isinstance(forked, DummyTool)
|
||||
assert forked is not tool
|
||||
assert forked.runtime == new_runtime
|
||||
assert forked.entity == tool.entity
|
||||
assert forked.entity is not tool.entity
|
||||
|
||||
|
||||
def test_get_runtime_parameters_and_merge_runtime_parameters():
|
||||
tool = _build_tool()
|
||||
original = DummyParameter(name="temperature", type=DummyCastType(), form="schema", required=True, default="0.7")
|
||||
tool.entity.parameters = cast(Any, [original])
|
||||
|
||||
default_runtime_parameters = tool.get_runtime_parameters()
|
||||
assert default_runtime_parameters == [original]
|
||||
|
||||
override = DummyParameter(name="temperature", type=DummyCastType(), form="llm", required=False, default="0.5")
|
||||
appended = DummyParameter(name="new_param", type=DummyCastType(), form="form", required=False, default="x")
|
||||
tool.runtime_parameter_overrides = [override, appended]
|
||||
|
||||
merged = tool.get_merged_runtime_parameters()
|
||||
assert len(merged) == 2
|
||||
assert merged[0].name == "temperature"
|
||||
assert merged[0].form == "llm"
|
||||
assert merged[0].required is False
|
||||
assert merged[0].default == "0.5"
|
||||
assert merged[1].name == "new_param"
|
||||
|
||||
|
||||
def test_message_factory_helpers():
|
||||
tool = _build_tool()
|
||||
|
||||
image_message = tool.create_image_message("https://example.com/image.png")
|
||||
assert image_message.type == ToolInvokeMessage.MessageType.IMAGE
|
||||
assert image_message.message.text == "https://example.com/image.png"
|
||||
|
||||
file_obj = object()
|
||||
file_message = tool.create_file_message(file_obj) # type: ignore[arg-type]
|
||||
assert file_message.type == ToolInvokeMessage.MessageType.FILE
|
||||
assert file_message.message.file_marker == "file_marker"
|
||||
assert file_message.meta == {"file": file_obj}
|
||||
|
||||
link_message = tool.create_link_message("https://example.com")
|
||||
assert link_message.type == ToolInvokeMessage.MessageType.LINK
|
||||
assert link_message.message.text == "https://example.com"
|
||||
|
||||
text_message = tool.create_text_message("hello")
|
||||
assert text_message.type == ToolInvokeMessage.MessageType.TEXT
|
||||
assert text_message.message.text == "hello"
|
||||
|
||||
blob_message = tool.create_blob_message(b"blob", meta={"source": "unit-test"})
|
||||
assert blob_message.type == ToolInvokeMessage.MessageType.BLOB
|
||||
assert blob_message.message.blob == b"blob"
|
||||
assert blob_message.meta == {"source": "unit-test"}
|
||||
|
||||
json_message = tool.create_json_message({"k": "v"}, suppress_output=True)
|
||||
assert json_message.type == ToolInvokeMessage.MessageType.JSON
|
||||
assert json_message.message.json_object == {"k": "v"}
|
||||
assert json_message.message.suppress_output is True
|
||||
|
||||
variable_message = tool.create_variable_message("answer", 42, stream=False)
|
||||
assert variable_message.type == ToolInvokeMessage.MessageType.VARIABLE
|
||||
assert variable_message.message.variable_name == "answer"
|
||||
assert variable_message.message.variable_value == 42
|
||||
assert variable_message.message.stream is False
|
||||
|
||||
|
||||
def test_base_abstract_invoke_placeholder_returns_none():
|
||||
tool = _build_tool()
|
||||
assert Tool._invoke(tool, user_id="u", tool_parameters={}) is None
|
||||
@@ -255,6 +255,32 @@ def test_create_variable_message():
|
||||
assert message.message.stream is False
|
||||
|
||||
|
||||
def test_create_file_message_should_include_file_marker():
|
||||
entity = ToolEntity(
|
||||
identity=ToolIdentity(author="test", name="test tool", label=I18nObject(en_US="test tool"), provider="test"),
|
||||
parameters=[],
|
||||
description=None,
|
||||
has_runtime_parameters=False,
|
||||
)
|
||||
runtime = ToolRuntime(tenant_id="test_tool", invoke_from=InvokeFrom.EXPLORE)
|
||||
tool = WorkflowTool(
|
||||
workflow_app_id="",
|
||||
workflow_as_tool_id="",
|
||||
version="1",
|
||||
workflow_entities={},
|
||||
workflow_call_depth=1,
|
||||
entity=entity,
|
||||
runtime=runtime,
|
||||
)
|
||||
|
||||
file_obj = object()
|
||||
message = tool.create_file_message(file_obj) # type: ignore[arg-type]
|
||||
|
||||
assert message.type == ToolInvokeMessage.MessageType.FILE
|
||||
assert message.message.file_marker == "file_marker"
|
||||
assert message.meta == {"file": file_obj}
|
||||
|
||||
|
||||
def test_resolve_user_from_database_falls_back_to_end_user(monkeypatch: pytest.MonkeyPatch):
|
||||
"""Ensure worker context can resolve EndUser when Account is missing."""
|
||||
|
||||
|
||||
@@ -198,6 +198,15 @@ class SubscriptionTestCase:
|
||||
description: str = ""
|
||||
|
||||
|
||||
class FakeRedisClient:
|
||||
"""Minimal fake Redis client for unit tests."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.publish = MagicMock()
|
||||
self.spublish = MagicMock()
|
||||
self.pubsub = MagicMock(return_value=MagicMock())
|
||||
|
||||
|
||||
class TestRedisSubscription:
|
||||
"""Test cases for the _RedisSubscription class."""
|
||||
|
||||
@@ -619,10 +628,13 @@ class TestRedisSubscription:
|
||||
class TestRedisShardedSubscription:
|
||||
"""Test cases for the _RedisShardedSubscription class."""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_sharded_redis_type(self, monkeypatch):
|
||||
monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_redis_client(self) -> MagicMock:
|
||||
client = MagicMock()
|
||||
return client
|
||||
def mock_redis_client(self) -> FakeRedisClient:
|
||||
return FakeRedisClient()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_pubsub(self) -> MagicMock:
|
||||
@@ -636,7 +648,7 @@ class TestRedisShardedSubscription:
|
||||
|
||||
@pytest.fixture
|
||||
def sharded_subscription(
|
||||
self, mock_pubsub: MagicMock, mock_redis_client: MagicMock
|
||||
self, mock_pubsub: MagicMock, mock_redis_client: FakeRedisClient
|
||||
) -> Generator[_RedisShardedSubscription, None, None]:
|
||||
"""Create a _RedisShardedSubscription instance for testing."""
|
||||
subscription = _RedisShardedSubscription(
|
||||
@@ -657,7 +669,7 @@ class TestRedisShardedSubscription:
|
||||
|
||||
# ==================== Lifecycle Tests ====================
|
||||
|
||||
def test_sharded_subscription_initialization(self, mock_pubsub: MagicMock, mock_redis_client: MagicMock):
|
||||
def test_sharded_subscription_initialization(self, mock_pubsub: MagicMock, mock_redis_client: FakeRedisClient):
|
||||
"""Test that sharded subscription is properly initialized."""
|
||||
subscription = _RedisShardedSubscription(
|
||||
client=mock_redis_client,
|
||||
@@ -970,7 +982,7 @@ class TestRedisShardedSubscription:
|
||||
],
|
||||
)
|
||||
def test_sharded_subscription_scenarios(
|
||||
self, test_case: SubscriptionTestCase, mock_pubsub: MagicMock, mock_redis_client: MagicMock
|
||||
self, test_case: SubscriptionTestCase, mock_pubsub: MagicMock, mock_redis_client: FakeRedisClient
|
||||
):
|
||||
"""Test various sharded subscription scenarios using table-driven approach."""
|
||||
subscription = _RedisShardedSubscription(
|
||||
@@ -1058,7 +1070,7 @@ class TestRedisShardedSubscription:
|
||||
# Close should still work
|
||||
sharded_subscription.close() # Should not raise
|
||||
|
||||
def test_channel_name_variations(self, mock_pubsub: MagicMock, mock_redis_client: MagicMock):
|
||||
def test_channel_name_variations(self, mock_pubsub: MagicMock, mock_redis_client: FakeRedisClient):
|
||||
"""Test various sharded channel name formats."""
|
||||
channel_names = [
|
||||
"simple",
|
||||
@@ -1120,10 +1132,13 @@ class TestRedisSubscriptionCommon:
|
||||
"""Parameterized fixture providing subscription type and class."""
|
||||
return request.param
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_sharded_redis_type(self, monkeypatch):
|
||||
monkeypatch.setattr("libs.broadcast_channel.redis.sharded_channel.Redis", FakeRedisClient)
|
||||
|
||||
@pytest.fixture
|
||||
def mock_redis_client(self) -> MagicMock:
|
||||
client = MagicMock()
|
||||
return client
|
||||
def mock_redis_client(self) -> FakeRedisClient:
|
||||
return FakeRedisClient()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_pubsub(self) -> MagicMock:
|
||||
@@ -1140,7 +1155,7 @@ class TestRedisSubscriptionCommon:
|
||||
return pubsub
|
||||
|
||||
@pytest.fixture
|
||||
def subscription(self, subscription_params, mock_pubsub: MagicMock, mock_redis_client: MagicMock):
|
||||
def subscription(self, subscription_params, mock_pubsub: MagicMock, mock_redis_client: FakeRedisClient):
|
||||
"""Create a subscription instance based on parameterized type."""
|
||||
subscription_type, subscription_class = subscription_params
|
||||
topic_name = f"test-{subscription_type}-topic"
|
||||
|
||||
@@ -698,6 +698,132 @@ class TestTenantService:
|
||||
|
||||
self._assert_database_operations_called(mock_db_dependencies["db"])
|
||||
|
||||
# ==================== Member Removal Tests ====================
|
||||
|
||||
def test_remove_pending_member_deletes_orphaned_account(self):
|
||||
"""Test that removing a pending member with no other workspaces deletes the account."""
|
||||
# Arrange
|
||||
mock_tenant = MagicMock()
|
||||
mock_tenant.id = "tenant-456"
|
||||
mock_operator = TestAccountAssociatedDataFactory.create_account_mock(account_id="operator-123", role="owner")
|
||||
mock_pending_member = TestAccountAssociatedDataFactory.create_account_mock(
|
||||
account_id="pending-user-789", email="pending@example.com", status=AccountStatus.PENDING
|
||||
)
|
||||
|
||||
mock_ta = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="pending-user-789", role="normal"
|
||||
)
|
||||
|
||||
with patch("services.account_service.db") as mock_db:
|
||||
mock_operator_join = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="operator-123", role="owner"
|
||||
)
|
||||
|
||||
query_mock_permission = MagicMock()
|
||||
query_mock_permission.filter_by.return_value.first.return_value = mock_operator_join
|
||||
|
||||
query_mock_ta = MagicMock()
|
||||
query_mock_ta.filter_by.return_value.first.return_value = mock_ta
|
||||
|
||||
query_mock_count = MagicMock()
|
||||
query_mock_count.filter_by.return_value.count.return_value = 0
|
||||
|
||||
mock_db.session.query.side_effect = [query_mock_permission, query_mock_ta, query_mock_count]
|
||||
|
||||
with patch("services.enterprise.account_deletion_sync.sync_workspace_member_removal") as mock_sync:
|
||||
mock_sync.return_value = True
|
||||
|
||||
# Act
|
||||
TenantService.remove_member_from_tenant(mock_tenant, mock_pending_member, mock_operator)
|
||||
|
||||
# Assert: enterprise sync still receives the correct member ID
|
||||
mock_sync.assert_called_once_with(
|
||||
workspace_id="tenant-456",
|
||||
member_id="pending-user-789",
|
||||
source="workspace_member_removed",
|
||||
)
|
||||
|
||||
# Assert: both join record and account should be deleted
|
||||
mock_db.session.delete.assert_any_call(mock_ta)
|
||||
mock_db.session.delete.assert_any_call(mock_pending_member)
|
||||
assert mock_db.session.delete.call_count == 2
|
||||
|
||||
def test_remove_pending_member_keeps_account_with_other_workspaces(self):
|
||||
"""Test that removing a pending member who belongs to other workspaces preserves the account."""
|
||||
# Arrange
|
||||
mock_tenant = MagicMock()
|
||||
mock_tenant.id = "tenant-456"
|
||||
mock_operator = TestAccountAssociatedDataFactory.create_account_mock(account_id="operator-123", role="owner")
|
||||
mock_pending_member = TestAccountAssociatedDataFactory.create_account_mock(
|
||||
account_id="pending-user-789", email="pending@example.com", status=AccountStatus.PENDING
|
||||
)
|
||||
|
||||
mock_ta = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="pending-user-789", role="normal"
|
||||
)
|
||||
|
||||
with patch("services.account_service.db") as mock_db:
|
||||
mock_operator_join = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="operator-123", role="owner"
|
||||
)
|
||||
|
||||
query_mock_permission = MagicMock()
|
||||
query_mock_permission.filter_by.return_value.first.return_value = mock_operator_join
|
||||
|
||||
query_mock_ta = MagicMock()
|
||||
query_mock_ta.filter_by.return_value.first.return_value = mock_ta
|
||||
|
||||
# Remaining join count = 1 (still in another workspace)
|
||||
query_mock_count = MagicMock()
|
||||
query_mock_count.filter_by.return_value.count.return_value = 1
|
||||
|
||||
mock_db.session.query.side_effect = [query_mock_permission, query_mock_ta, query_mock_count]
|
||||
|
||||
with patch("services.enterprise.account_deletion_sync.sync_workspace_member_removal") as mock_sync:
|
||||
mock_sync.return_value = True
|
||||
|
||||
# Act
|
||||
TenantService.remove_member_from_tenant(mock_tenant, mock_pending_member, mock_operator)
|
||||
|
||||
# Assert: only the join record should be deleted, not the account
|
||||
mock_db.session.delete.assert_called_once_with(mock_ta)
|
||||
|
||||
def test_remove_active_member_preserves_account(self):
|
||||
"""Test that removing an active member never deletes the account, even with no other workspaces."""
|
||||
# Arrange
|
||||
mock_tenant = MagicMock()
|
||||
mock_tenant.id = "tenant-456"
|
||||
mock_operator = TestAccountAssociatedDataFactory.create_account_mock(account_id="operator-123", role="owner")
|
||||
mock_active_member = TestAccountAssociatedDataFactory.create_account_mock(
|
||||
account_id="active-user-789", email="active@example.com", status=AccountStatus.ACTIVE
|
||||
)
|
||||
|
||||
mock_ta = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="active-user-789", role="normal"
|
||||
)
|
||||
|
||||
with patch("services.account_service.db") as mock_db:
|
||||
mock_operator_join = TestAccountAssociatedDataFactory.create_tenant_join_mock(
|
||||
tenant_id="tenant-456", account_id="operator-123", role="owner"
|
||||
)
|
||||
|
||||
query_mock_permission = MagicMock()
|
||||
query_mock_permission.filter_by.return_value.first.return_value = mock_operator_join
|
||||
|
||||
query_mock_ta = MagicMock()
|
||||
query_mock_ta.filter_by.return_value.first.return_value = mock_ta
|
||||
|
||||
mock_db.session.query.side_effect = [query_mock_permission, query_mock_ta]
|
||||
|
||||
with patch("services.enterprise.account_deletion_sync.sync_workspace_member_removal") as mock_sync:
|
||||
mock_sync.return_value = True
|
||||
|
||||
# Act
|
||||
TenantService.remove_member_from_tenant(mock_tenant, mock_active_member, mock_operator)
|
||||
|
||||
# Assert: only the join record should be deleted
|
||||
mock_db.session.delete.assert_called_once_with(mock_ta)
|
||||
|
||||
# ==================== Tenant Switching Tests ====================
|
||||
|
||||
def test_switch_tenant_success(self):
|
||||
|
||||
@@ -17,7 +17,6 @@ from core.workflow.nodes.human_input.entities import (
|
||||
from core.workflow.nodes.human_input.enums import FormInputType, HumanInputFormKind, HumanInputFormStatus
|
||||
from models.human_input import RecipientType
|
||||
from services.human_input_service import Form, FormExpiredError, HumanInputService, InvalidFormDataError
|
||||
from tasks.app_generate.workflow_execute_task import WORKFLOW_BASED_APP_EXECUTION_QUEUE
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -88,7 +87,6 @@ def test_enqueue_resume_dispatches_task_for_workflow(mocker, mock_session_factor
|
||||
|
||||
resume_task.apply_async.assert_called_once()
|
||||
call_kwargs = resume_task.apply_async.call_args.kwargs
|
||||
assert call_kwargs["queue"] == WORKFLOW_BASED_APP_EXECUTION_QUEUE
|
||||
assert call_kwargs["kwargs"]["payload"]["workflow_run_id"] == "workflow-run-id"
|
||||
|
||||
|
||||
@@ -130,7 +128,6 @@ def test_enqueue_resume_dispatches_task_for_advanced_chat(mocker, mock_session_f
|
||||
|
||||
resume_task.apply_async.assert_called_once()
|
||||
call_kwargs = resume_task.apply_async.call_args.kwargs
|
||||
assert call_kwargs["queue"] == WORKFLOW_BASED_APP_EXECUTION_QUEUE
|
||||
assert call_kwargs["kwargs"]["payload"]["workflow_run_id"] == "workflow-run-id"
|
||||
|
||||
|
||||
|
||||
@@ -106,10 +106,10 @@ if [[ -z "${QUEUES}" ]]; then
|
||||
# Configure queues based on edition
|
||||
if [[ "${EDITION}" == "CLOUD" ]]; then
|
||||
# Cloud edition: separate queues for dataset and trigger tasks
|
||||
QUEUES="dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow_professional,workflow_team,workflow_sandbox,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention"
|
||||
QUEUES="dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow_professional,workflow_team,workflow_sandbox,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention,workflow_based_app_execution"
|
||||
else
|
||||
# Community edition (SELF_HOSTED): dataset and workflow have separate queues
|
||||
QUEUES="dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention"
|
||||
QUEUES="dataset,priority_dataset,priority_pipeline,pipeline,mail,ops_trace,app_deletion,plugin,workflow_storage,conversation,workflow,schedule_poller,schedule_executor,triggered_workflow_dispatcher,trigger_refresh_executor,retention,workflow_based_app_execution"
|
||||
fi
|
||||
|
||||
echo "No queues specified, using edition-based defaults: ${QUEUES}"
|
||||
|
||||
@@ -62,6 +62,9 @@ LANG=C.UTF-8
|
||||
LC_ALL=C.UTF-8
|
||||
PYTHONIOENCODING=utf-8
|
||||
|
||||
# Set UV cache directory to avoid permission issues with non-existent home directory
|
||||
UV_CACHE_DIR=/tmp/.uv-cache
|
||||
|
||||
# ------------------------------
|
||||
# Server Configuration
|
||||
# ------------------------------
|
||||
@@ -384,6 +387,8 @@ CELERY_USE_SENTINEL=false
|
||||
CELERY_SENTINEL_MASTER_NAME=
|
||||
CELERY_SENTINEL_PASSWORD=
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT=0.1
|
||||
# e.g. {"tasks.add": {"rate_limit": "10/s"}}
|
||||
CELERY_TASK_ANNOTATIONS=null
|
||||
|
||||
# ------------------------------
|
||||
# CORS Configuration
|
||||
|
||||
@@ -16,6 +16,7 @@ x-shared-env: &shared-api-worker-env
|
||||
LANG: ${LANG:-C.UTF-8}
|
||||
LC_ALL: ${LC_ALL:-C.UTF-8}
|
||||
PYTHONIOENCODING: ${PYTHONIOENCODING:-utf-8}
|
||||
UV_CACHE_DIR: ${UV_CACHE_DIR:-/tmp/.uv-cache}
|
||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||
LOG_OUTPUT_FORMAT: ${LOG_OUTPUT_FORMAT:-text}
|
||||
LOG_FILE: ${LOG_FILE:-/app/logs/server.log}
|
||||
@@ -105,6 +106,7 @@ x-shared-env: &shared-api-worker-env
|
||||
CELERY_SENTINEL_MASTER_NAME: ${CELERY_SENTINEL_MASTER_NAME:-}
|
||||
CELERY_SENTINEL_PASSWORD: ${CELERY_SENTINEL_PASSWORD:-}
|
||||
CELERY_SENTINEL_SOCKET_TIMEOUT: ${CELERY_SENTINEL_SOCKET_TIMEOUT:-0.1}
|
||||
CELERY_TASK_ANNOTATIONS: ${CELERY_TASK_ANNOTATIONS:-null}
|
||||
WEB_API_CORS_ALLOW_ORIGINS: ${WEB_API_CORS_ALLOW_ORIGINS:-*}
|
||||
CONSOLE_CORS_ALLOW_ORIGINS: ${CONSOLE_CORS_ALLOW_ORIGINS:-*}
|
||||
COOKIE_DOMAIN: ${COOKIE_DOMAIN:-}
|
||||
|
||||
8
sdks/nodejs-client/pnpm-lock.yaml
generated
8
sdks/nodejs-client/pnpm-lock.yaml
generated
@@ -10,7 +10,7 @@ importers:
|
||||
dependencies:
|
||||
axios:
|
||||
specifier: ^1.13.2
|
||||
version: 1.13.2
|
||||
version: 1.13.5
|
||||
devDependencies:
|
||||
'@eslint/js':
|
||||
specifier: ^9.39.2
|
||||
@@ -544,8 +544,8 @@ packages:
|
||||
asynckit@0.4.0:
|
||||
resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==}
|
||||
|
||||
axios@1.13.2:
|
||||
resolution: {integrity: sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==}
|
||||
axios@1.13.5:
|
||||
resolution: {integrity: sha512-cz4ur7Vb0xS4/KUN0tPWe44eqxrIu31me+fbang3ijiNscE129POzipJJA6zniq2C/Z6sJCjMimjS8Lc/GAs8Q==}
|
||||
|
||||
balanced-match@1.0.2:
|
||||
resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==}
|
||||
@@ -1677,7 +1677,7 @@ snapshots:
|
||||
|
||||
asynckit@0.4.0: {}
|
||||
|
||||
axios@1.13.2:
|
||||
axios@1.13.5:
|
||||
dependencies:
|
||||
follow-redirects: 1.15.11
|
||||
form-data: 4.0.5
|
||||
|
||||
301
web/__tests__/datasets/create-dataset-flow.test.tsx
Normal file
301
web/__tests__/datasets/create-dataset-flow.test.tsx
Normal file
@@ -0,0 +1,301 @@
|
||||
/**
|
||||
* Integration Test: Create Dataset Flow
|
||||
*
|
||||
* Tests cross-module data flow: step-one data → step-two hooks → creation params → API call
|
||||
* Validates data contracts between steps.
|
||||
*/
|
||||
|
||||
import type { CustomFile } from '@/models/datasets'
|
||||
import type { RetrievalConfig } from '@/types/app'
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { ChunkingMode, DataSourceType, ProcessMode } from '@/models/datasets'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
|
||||
const mockCreateFirstDocument = vi.fn()
|
||||
const mockCreateDocument = vi.fn()
|
||||
vi.mock('@/service/knowledge/use-create-dataset', () => ({
|
||||
useCreateFirstDocument: () => ({ mutateAsync: mockCreateFirstDocument, isPending: false }),
|
||||
useCreateDocument: () => ({ mutateAsync: mockCreateDocument, isPending: false }),
|
||||
getNotionInfo: (pages: { page_id: string }[], credentialId: string) => ({
|
||||
workspace_id: 'ws-1',
|
||||
pages: pages.map(p => p.page_id),
|
||||
notion_credential_id: credentialId,
|
||||
}),
|
||||
getWebsiteInfo: (opts: { websitePages: { url: string }[], websiteCrawlProvider: string }) => ({
|
||||
urls: opts.websitePages.map(p => p.url),
|
||||
only_main_content: true,
|
||||
provider: opts.websiteCrawlProvider,
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
useInvalidDatasetList: () => vi.fn(),
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
default: { notify: vi.fn() },
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/base/amplitude', () => ({
|
||||
trackEvent: vi.fn(),
|
||||
}))
|
||||
|
||||
// Import hooks after mocks
|
||||
const { useSegmentationState, DEFAULT_SEGMENT_IDENTIFIER, DEFAULT_MAXIMUM_CHUNK_LENGTH, DEFAULT_OVERLAP }
|
||||
= await import('@/app/components/datasets/create/step-two/hooks')
|
||||
const { useDocumentCreation, IndexingType }
|
||||
= await import('@/app/components/datasets/create/step-two/hooks')
|
||||
|
||||
const createMockFile = (overrides?: Partial<CustomFile>): CustomFile => ({
|
||||
id: 'file-1',
|
||||
name: 'test.txt',
|
||||
type: 'text/plain',
|
||||
size: 1024,
|
||||
extension: '.txt',
|
||||
mime_type: 'text/plain',
|
||||
created_at: 0,
|
||||
created_by: '',
|
||||
...overrides,
|
||||
} as CustomFile)
|
||||
|
||||
describe('Create Dataset Flow - Cross-Step Data Contract', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Step-One → Step-Two: Segmentation Defaults', () => {
|
||||
it('should initialise with correct default segmentation values', () => {
|
||||
const { result } = renderHook(() => useSegmentationState())
|
||||
expect(result.current.segmentIdentifier).toBe(DEFAULT_SEGMENT_IDENTIFIER)
|
||||
expect(result.current.maxChunkLength).toBe(DEFAULT_MAXIMUM_CHUNK_LENGTH)
|
||||
expect(result.current.overlap).toBe(DEFAULT_OVERLAP)
|
||||
expect(result.current.segmentationType).toBe(ProcessMode.general)
|
||||
})
|
||||
|
||||
it('should produce valid process rule for general chunking', () => {
|
||||
const { result } = renderHook(() => useSegmentationState())
|
||||
const processRule = result.current.getProcessRule(ChunkingMode.text)
|
||||
|
||||
// mode should be segmentationType = ProcessMode.general = 'custom'
|
||||
expect(processRule.mode).toBe('custom')
|
||||
expect(processRule.rules.segmentation).toEqual({
|
||||
separator: '\n\n', // unescaped from \\n\\n
|
||||
max_tokens: DEFAULT_MAXIMUM_CHUNK_LENGTH,
|
||||
chunk_overlap: DEFAULT_OVERLAP,
|
||||
})
|
||||
// rules is empty initially since no default config loaded
|
||||
expect(processRule.rules.pre_processing_rules).toEqual([])
|
||||
})
|
||||
|
||||
it('should produce valid process rule for parent-child chunking', () => {
|
||||
const { result } = renderHook(() => useSegmentationState())
|
||||
const processRule = result.current.getProcessRule(ChunkingMode.parentChild)
|
||||
|
||||
expect(processRule.mode).toBe('hierarchical')
|
||||
expect(processRule.rules.parent_mode).toBe('paragraph')
|
||||
expect(processRule.rules.segmentation).toEqual({
|
||||
separator: '\n\n',
|
||||
max_tokens: 1024,
|
||||
})
|
||||
expect(processRule.rules.subchunk_segmentation).toEqual({
|
||||
separator: '\n',
|
||||
max_tokens: 512,
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Step-Two → Creation API: Params Building', () => {
|
||||
it('should build valid creation params for file upload workflow', () => {
|
||||
const files = [createMockFile()]
|
||||
const { result: segResult } = renderHook(() => useSegmentationState())
|
||||
const { result: creationResult } = renderHook(() =>
|
||||
useDocumentCreation({
|
||||
dataSourceType: DataSourceType.FILE,
|
||||
files,
|
||||
notionPages: [],
|
||||
notionCredentialId: '',
|
||||
websitePages: [],
|
||||
}),
|
||||
)
|
||||
|
||||
const processRule = segResult.current.getProcessRule(ChunkingMode.text)
|
||||
const retrievalConfig: RetrievalConfig = {
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
}
|
||||
|
||||
const params = creationResult.current.buildCreationParams(
|
||||
ChunkingMode.text,
|
||||
'English',
|
||||
processRule,
|
||||
retrievalConfig,
|
||||
{ provider: 'openai', model: 'text-embedding-ada-002' },
|
||||
IndexingType.QUALIFIED,
|
||||
)
|
||||
|
||||
expect(params).not.toBeNull()
|
||||
// File IDs come from file.id (not file.file.id)
|
||||
expect(params!.data_source.type).toBe(DataSourceType.FILE)
|
||||
expect(params!.data_source.info_list.file_info_list?.file_ids).toContain('file-1')
|
||||
|
||||
expect(params!.indexing_technique).toBe(IndexingType.QUALIFIED)
|
||||
expect(params!.doc_form).toBe(ChunkingMode.text)
|
||||
expect(params!.doc_language).toBe('English')
|
||||
expect(params!.embedding_model).toBe('text-embedding-ada-002')
|
||||
expect(params!.embedding_model_provider).toBe('openai')
|
||||
expect(params!.process_rule.mode).toBe('custom')
|
||||
})
|
||||
|
||||
it('should validate params: overlap must not exceed maxChunkLength', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useDocumentCreation({
|
||||
dataSourceType: DataSourceType.FILE,
|
||||
files: [createMockFile()],
|
||||
notionPages: [],
|
||||
notionCredentialId: '',
|
||||
websitePages: [],
|
||||
}),
|
||||
)
|
||||
|
||||
// validateParams returns false (invalid) when overlap > maxChunkLength for general mode
|
||||
const isValid = result.current.validateParams({
|
||||
segmentationType: 'general',
|
||||
maxChunkLength: 100,
|
||||
limitMaxChunkLength: 4000,
|
||||
overlap: 200, // overlap > maxChunkLength
|
||||
indexType: IndexingType.QUALIFIED,
|
||||
embeddingModel: { provider: 'openai', model: 'text-embedding-ada-002' },
|
||||
rerankModelList: [],
|
||||
retrievalConfig: {
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
},
|
||||
})
|
||||
expect(isValid).toBe(false)
|
||||
})
|
||||
|
||||
it('should validate params: maxChunkLength must not exceed limit', () => {
|
||||
const { result } = renderHook(() =>
|
||||
useDocumentCreation({
|
||||
dataSourceType: DataSourceType.FILE,
|
||||
files: [createMockFile()],
|
||||
notionPages: [],
|
||||
notionCredentialId: '',
|
||||
websitePages: [],
|
||||
}),
|
||||
)
|
||||
|
||||
const isValid = result.current.validateParams({
|
||||
segmentationType: 'general',
|
||||
maxChunkLength: 5000,
|
||||
limitMaxChunkLength: 4000, // limit < maxChunkLength
|
||||
overlap: 50,
|
||||
indexType: IndexingType.QUALIFIED,
|
||||
embeddingModel: { provider: 'openai', model: 'text-embedding-ada-002' },
|
||||
rerankModelList: [],
|
||||
retrievalConfig: {
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
},
|
||||
})
|
||||
expect(isValid).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Full Flow: Segmentation State → Process Rule → Creation Params Consistency', () => {
|
||||
it('should keep segmentation values consistent across getProcessRule and buildCreationParams', () => {
|
||||
const files = [createMockFile()]
|
||||
const { result: segResult } = renderHook(() => useSegmentationState())
|
||||
const { result: creationResult } = renderHook(() =>
|
||||
useDocumentCreation({
|
||||
dataSourceType: DataSourceType.FILE,
|
||||
files,
|
||||
notionPages: [],
|
||||
notionCredentialId: '',
|
||||
websitePages: [],
|
||||
}),
|
||||
)
|
||||
|
||||
// Change segmentation settings
|
||||
act(() => {
|
||||
segResult.current.setMaxChunkLength(2048)
|
||||
segResult.current.setOverlap(100)
|
||||
})
|
||||
|
||||
const processRule = segResult.current.getProcessRule(ChunkingMode.text)
|
||||
expect(processRule.rules.segmentation.max_tokens).toBe(2048)
|
||||
expect(processRule.rules.segmentation.chunk_overlap).toBe(100)
|
||||
|
||||
const params = creationResult.current.buildCreationParams(
|
||||
ChunkingMode.text,
|
||||
'Chinese',
|
||||
processRule,
|
||||
{
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
},
|
||||
{ provider: 'openai', model: 'text-embedding-ada-002' },
|
||||
IndexingType.QUALIFIED,
|
||||
)
|
||||
|
||||
expect(params).not.toBeNull()
|
||||
expect(params!.process_rule.rules.segmentation.max_tokens).toBe(2048)
|
||||
expect(params!.process_rule.rules.segmentation.chunk_overlap).toBe(100)
|
||||
expect(params!.doc_language).toBe('Chinese')
|
||||
})
|
||||
|
||||
it('should support parent-child mode through the full pipeline', () => {
|
||||
const files = [createMockFile()]
|
||||
const { result: segResult } = renderHook(() => useSegmentationState())
|
||||
const { result: creationResult } = renderHook(() =>
|
||||
useDocumentCreation({
|
||||
dataSourceType: DataSourceType.FILE,
|
||||
files,
|
||||
notionPages: [],
|
||||
notionCredentialId: '',
|
||||
websitePages: [],
|
||||
}),
|
||||
)
|
||||
|
||||
const processRule = segResult.current.getProcessRule(ChunkingMode.parentChild)
|
||||
const params = creationResult.current.buildCreationParams(
|
||||
ChunkingMode.parentChild,
|
||||
'English',
|
||||
processRule,
|
||||
{
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
},
|
||||
{ provider: 'openai', model: 'text-embedding-ada-002' },
|
||||
IndexingType.QUALIFIED,
|
||||
)
|
||||
|
||||
expect(params).not.toBeNull()
|
||||
expect(params!.doc_form).toBe(ChunkingMode.parentChild)
|
||||
expect(params!.process_rule.mode).toBe('hierarchical')
|
||||
expect(params!.process_rule.rules.parent_mode).toBe('paragraph')
|
||||
expect(params!.process_rule.rules.subchunk_segmentation).toBeDefined()
|
||||
})
|
||||
})
|
||||
})
|
||||
451
web/__tests__/datasets/dataset-settings-flow.test.tsx
Normal file
451
web/__tests__/datasets/dataset-settings-flow.test.tsx
Normal file
@@ -0,0 +1,451 @@
|
||||
/**
|
||||
* Integration Test: Dataset Settings Flow
|
||||
*
|
||||
* Tests cross-module data contracts in the dataset settings form:
|
||||
* useFormState hook ↔ index method config ↔ retrieval config ↔ permission state.
|
||||
*
|
||||
* The unit-level use-form-state.spec.ts validates the hook in isolation.
|
||||
* This integration test verifies that changing one configuration dimension
|
||||
* correctly cascades to dependent parts (index method → retrieval config,
|
||||
* permission → member list visibility, embedding model → embedding available state).
|
||||
*/
|
||||
|
||||
import type { DataSet } from '@/models/datasets'
|
||||
import type { RetrievalConfig } from '@/types/app'
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { IndexingType } from '@/app/components/datasets/create/step-two'
|
||||
import { ChunkingMode, DatasetPermission, DataSourceType, WeightedScoreEnum } from '@/models/datasets'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
|
||||
// --- Mocks ---
|
||||
|
||||
const mockMutateDatasets = vi.fn()
|
||||
const mockInvalidDatasetList = vi.fn()
|
||||
const mockUpdateDatasetSetting = vi.fn().mockResolvedValue({})
|
||||
|
||||
vi.mock('@/context/app-context', () => ({
|
||||
useSelector: () => false,
|
||||
}))
|
||||
|
||||
vi.mock('@/service/datasets', () => ({
|
||||
updateDatasetSetting: (...args: unknown[]) => mockUpdateDatasetSetting(...args),
|
||||
}))
|
||||
|
||||
vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
useInvalidDatasetList: () => mockInvalidDatasetList,
|
||||
}))
|
||||
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useMembers: () => ({
|
||||
data: {
|
||||
accounts: [
|
||||
{ id: 'user-1', name: 'Alice', email: 'alice@example.com', role: 'owner', avatar: '', avatar_url: '', last_login_at: '', created_at: '', status: 'active' },
|
||||
{ id: 'user-2', name: 'Bob', email: 'bob@example.com', role: 'admin', avatar: '', avatar_url: '', last_login_at: '', created_at: '', status: 'active' },
|
||||
{ id: 'user-3', name: 'Charlie', email: 'charlie@example.com', role: 'normal', avatar: '', avatar_url: '', last_login_at: '', created_at: '', status: 'active' },
|
||||
],
|
||||
},
|
||||
}),
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({
|
||||
useModelList: () => ({ data: [] }),
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/datasets/common/check-rerank-model', () => ({
|
||||
isReRankModelSelected: () => true,
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
default: { notify: vi.fn() },
|
||||
}))
|
||||
|
||||
// --- Dataset factory ---
|
||||
|
||||
const createMockDataset = (overrides?: Partial<DataSet>): DataSet => ({
|
||||
id: 'ds-settings-1',
|
||||
name: 'Settings Test Dataset',
|
||||
description: 'Integration test dataset',
|
||||
permission: DatasetPermission.onlyMe,
|
||||
icon_info: {
|
||||
icon_type: 'emoji',
|
||||
icon: '📙',
|
||||
icon_background: '#FFF4ED',
|
||||
icon_url: '',
|
||||
},
|
||||
indexing_technique: 'high_quality',
|
||||
indexing_status: 'completed',
|
||||
data_source_type: DataSourceType.FILE,
|
||||
doc_form: ChunkingMode.text,
|
||||
embedding_model: 'text-embedding-ada-002',
|
||||
embedding_model_provider: 'openai',
|
||||
embedding_available: true,
|
||||
app_count: 2,
|
||||
document_count: 10,
|
||||
total_document_count: 10,
|
||||
word_count: 5000,
|
||||
provider: 'vendor',
|
||||
tags: [],
|
||||
partial_member_list: [],
|
||||
external_knowledge_info: {
|
||||
external_knowledge_id: '',
|
||||
external_knowledge_api_id: '',
|
||||
external_knowledge_api_name: '',
|
||||
external_knowledge_api_endpoint: '',
|
||||
},
|
||||
external_retrieval_model: {
|
||||
top_k: 2,
|
||||
score_threshold: 0.5,
|
||||
score_threshold_enabled: false,
|
||||
},
|
||||
retrieval_model_dict: {
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
} as RetrievalConfig,
|
||||
retrieval_model: {
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
} as RetrievalConfig,
|
||||
built_in_field_enabled: false,
|
||||
keyword_number: 10,
|
||||
created_by: 'user-1',
|
||||
updated_by: 'user-1',
|
||||
updated_at: Date.now(),
|
||||
runtime_mode: 'general',
|
||||
enable_api: true,
|
||||
is_multimodal: false,
|
||||
...overrides,
|
||||
} as DataSet)
|
||||
|
||||
let mockDataset: DataSet = createMockDataset()
|
||||
|
||||
vi.mock('@/context/dataset-detail', () => ({
|
||||
useDatasetDetailContextWithSelector: (
|
||||
selector: (state: { dataset: DataSet | null, mutateDatasetRes: () => void }) => unknown,
|
||||
) => selector({ dataset: mockDataset, mutateDatasetRes: mockMutateDatasets }),
|
||||
}))
|
||||
|
||||
// Import after mocks are registered
|
||||
const { useFormState } = await import(
|
||||
'@/app/components/datasets/settings/form/hooks/use-form-state',
|
||||
)
|
||||
|
||||
describe('Dataset Settings Flow - Cross-Module Configuration Cascade', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
mockUpdateDatasetSetting.mockResolvedValue({})
|
||||
mockDataset = createMockDataset()
|
||||
})
|
||||
|
||||
describe('Form State Initialization from Dataset → Index Method → Retrieval Config Chain', () => {
|
||||
it('should initialise all form dimensions from a QUALIFIED dataset', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
expect(result.current.name).toBe('Settings Test Dataset')
|
||||
expect(result.current.description).toBe('Integration test dataset')
|
||||
expect(result.current.indexMethod).toBe('high_quality')
|
||||
expect(result.current.embeddingModel).toEqual({
|
||||
provider: 'openai',
|
||||
model: 'text-embedding-ada-002',
|
||||
})
|
||||
expect(result.current.retrievalConfig.search_method).toBe(RETRIEVE_METHOD.semantic)
|
||||
})
|
||||
|
||||
it('should initialise from an ECONOMICAL dataset with keyword retrieval', () => {
|
||||
mockDataset = createMockDataset({
|
||||
indexing_technique: IndexingType.ECONOMICAL,
|
||||
embedding_model: '',
|
||||
embedding_model_provider: '',
|
||||
retrieval_model_dict: {
|
||||
search_method: RETRIEVE_METHOD.keywordSearch,
|
||||
reranking_enable: false,
|
||||
reranking_model: { reranking_provider_name: '', reranking_model_name: '' },
|
||||
top_k: 5,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0,
|
||||
} as RetrievalConfig,
|
||||
})
|
||||
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
expect(result.current.indexMethod).toBe(IndexingType.ECONOMICAL)
|
||||
expect(result.current.embeddingModel).toEqual({ provider: '', model: '' })
|
||||
expect(result.current.retrievalConfig.search_method).toBe(RETRIEVE_METHOD.keywordSearch)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Index Method Change → Retrieval Config Sync', () => {
|
||||
it('should allow switching index method from QUALIFIED to ECONOMICAL', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
expect(result.current.indexMethod).toBe('high_quality')
|
||||
|
||||
act(() => {
|
||||
result.current.setIndexMethod(IndexingType.ECONOMICAL)
|
||||
})
|
||||
|
||||
expect(result.current.indexMethod).toBe(IndexingType.ECONOMICAL)
|
||||
})
|
||||
|
||||
it('should allow updating retrieval config after index method switch', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setIndexMethod(IndexingType.ECONOMICAL)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setRetrievalConfig({
|
||||
...result.current.retrievalConfig,
|
||||
search_method: RETRIEVE_METHOD.keywordSearch,
|
||||
reranking_enable: false,
|
||||
})
|
||||
})
|
||||
|
||||
expect(result.current.indexMethod).toBe(IndexingType.ECONOMICAL)
|
||||
expect(result.current.retrievalConfig.search_method).toBe(RETRIEVE_METHOD.keywordSearch)
|
||||
expect(result.current.retrievalConfig.reranking_enable).toBe(false)
|
||||
})
|
||||
|
||||
it('should preserve retrieval config when switching back to QUALIFIED', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
const originalConfig = { ...result.current.retrievalConfig }
|
||||
|
||||
act(() => {
|
||||
result.current.setIndexMethod(IndexingType.ECONOMICAL)
|
||||
})
|
||||
act(() => {
|
||||
result.current.setIndexMethod(IndexingType.QUALIFIED)
|
||||
})
|
||||
|
||||
expect(result.current.indexMethod).toBe('high_quality')
|
||||
expect(result.current.retrievalConfig.search_method).toBe(originalConfig.search_method)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Permission Change → Member List Visibility Logic', () => {
|
||||
it('should start with onlyMe permission and empty member selection', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
expect(result.current.permission).toBe(DatasetPermission.onlyMe)
|
||||
expect(result.current.selectedMemberIDs).toEqual([])
|
||||
})
|
||||
|
||||
it('should enable member selection when switching to partialMembers', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.partialMembers)
|
||||
})
|
||||
|
||||
expect(result.current.permission).toBe(DatasetPermission.partialMembers)
|
||||
expect(result.current.memberList).toHaveLength(3)
|
||||
expect(result.current.memberList.map(m => m.id)).toEqual(['user-1', 'user-2', 'user-3'])
|
||||
})
|
||||
|
||||
it('should persist member selection through permission toggle', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.partialMembers)
|
||||
result.current.setSelectedMemberIDs(['user-1', 'user-3'])
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.allTeamMembers)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.partialMembers)
|
||||
})
|
||||
|
||||
expect(result.current.selectedMemberIDs).toEqual(['user-1', 'user-3'])
|
||||
})
|
||||
|
||||
it('should include partial_member_list in save payload only for partialMembers', async () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.partialMembers)
|
||||
result.current.setSelectedMemberIDs(['user-2'])
|
||||
})
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
expect(mockUpdateDatasetSetting).toHaveBeenCalledWith({
|
||||
datasetId: 'ds-settings-1',
|
||||
body: expect.objectContaining({
|
||||
permission: DatasetPermission.partialMembers,
|
||||
partial_member_list: [
|
||||
expect.objectContaining({ user_id: 'user-2', role: 'admin' }),
|
||||
],
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
it('should not include partial_member_list for allTeamMembers permission', async () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setPermission(DatasetPermission.allTeamMembers)
|
||||
})
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
const savedBody = mockUpdateDatasetSetting.mock.calls[0][0].body as Record<string, unknown>
|
||||
expect(savedBody).not.toHaveProperty('partial_member_list')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Form Submission Validation → All Fields Together', () => {
|
||||
it('should reject empty name on save', async () => {
|
||||
const Toast = await import('@/app/components/base/toast')
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setName('')
|
||||
})
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
expect(Toast.default.notify).toHaveBeenCalledWith({
|
||||
type: 'error',
|
||||
message: expect.any(String),
|
||||
})
|
||||
expect(mockUpdateDatasetSetting).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should include all configuration dimensions in a successful save', async () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setName('Updated Name')
|
||||
result.current.setDescription('Updated Description')
|
||||
result.current.setIndexMethod(IndexingType.ECONOMICAL)
|
||||
result.current.setKeywordNumber(15)
|
||||
})
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
expect(mockUpdateDatasetSetting).toHaveBeenCalledWith({
|
||||
datasetId: 'ds-settings-1',
|
||||
body: expect.objectContaining({
|
||||
name: 'Updated Name',
|
||||
description: 'Updated Description',
|
||||
indexing_technique: 'economy',
|
||||
keyword_number: 15,
|
||||
embedding_model: 'text-embedding-ada-002',
|
||||
embedding_model_provider: 'openai',
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
it('should call mutateDatasets and invalidDatasetList after successful save', async () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockMutateDatasets).toHaveBeenCalled()
|
||||
expect(mockInvalidDatasetList).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Embedding Model Change → Retrieval Config Cascade', () => {
|
||||
it('should update embedding model independently of retrieval config', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
const originalRetrievalConfig = { ...result.current.retrievalConfig }
|
||||
|
||||
act(() => {
|
||||
result.current.setEmbeddingModel({ provider: 'cohere', model: 'embed-english-v3.0' })
|
||||
})
|
||||
|
||||
expect(result.current.embeddingModel).toEqual({
|
||||
provider: 'cohere',
|
||||
model: 'embed-english-v3.0',
|
||||
})
|
||||
expect(result.current.retrievalConfig.search_method).toBe(originalRetrievalConfig.search_method)
|
||||
})
|
||||
|
||||
it('should propagate embedding model into weighted retrieval config on save', async () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setEmbeddingModel({ provider: 'cohere', model: 'embed-v3' })
|
||||
result.current.setRetrievalConfig({
|
||||
...result.current.retrievalConfig,
|
||||
search_method: RETRIEVE_METHOD.hybrid,
|
||||
weights: {
|
||||
weight_type: WeightedScoreEnum.Customized,
|
||||
vector_setting: {
|
||||
vector_weight: 0.6,
|
||||
embedding_provider_name: '',
|
||||
embedding_model_name: '',
|
||||
},
|
||||
keyword_setting: { keyword_weight: 0.4 },
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
await act(async () => {
|
||||
await result.current.handleSave()
|
||||
})
|
||||
|
||||
expect(mockUpdateDatasetSetting).toHaveBeenCalledWith({
|
||||
datasetId: 'ds-settings-1',
|
||||
body: expect.objectContaining({
|
||||
embedding_model: 'embed-v3',
|
||||
embedding_model_provider: 'cohere',
|
||||
retrieval_model: expect.objectContaining({
|
||||
weights: expect.objectContaining({
|
||||
vector_setting: expect.objectContaining({
|
||||
embedding_provider_name: 'cohere',
|
||||
embedding_model_name: 'embed-v3',
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle switching from semantic to hybrid search with embedding model', () => {
|
||||
const { result } = renderHook(() => useFormState())
|
||||
|
||||
act(() => {
|
||||
result.current.setRetrievalConfig({
|
||||
...result.current.retrievalConfig,
|
||||
search_method: RETRIEVE_METHOD.hybrid,
|
||||
reranking_enable: true,
|
||||
reranking_model: {
|
||||
reranking_provider_name: 'cohere',
|
||||
reranking_model_name: 'rerank-english-v3.0',
|
||||
},
|
||||
})
|
||||
})
|
||||
|
||||
expect(result.current.retrievalConfig.search_method).toBe(RETRIEVE_METHOD.hybrid)
|
||||
expect(result.current.retrievalConfig.reranking_enable).toBe(true)
|
||||
expect(result.current.embeddingModel.model).toBe('text-embedding-ada-002')
|
||||
})
|
||||
})
|
||||
})
|
||||
335
web/__tests__/datasets/document-management.test.tsx
Normal file
335
web/__tests__/datasets/document-management.test.tsx
Normal file
@@ -0,0 +1,335 @@
|
||||
/**
|
||||
* Integration Test: Document Management Flow
|
||||
*
|
||||
* Tests cross-module interactions: query state (URL-based) → document list sorting →
|
||||
* document selection → status filter utilities.
|
||||
* Validates the data contract between documents page hooks and list component hooks.
|
||||
*/
|
||||
|
||||
import type { SimpleDocumentDetail } from '@/models/datasets'
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { DataSourceType } from '@/models/datasets'
|
||||
|
||||
const mockPush = vi.fn()
|
||||
vi.mock('next/navigation', () => ({
|
||||
useSearchParams: () => new URLSearchParams(''),
|
||||
useRouter: () => ({ push: mockPush }),
|
||||
usePathname: () => '/datasets/ds-1/documents',
|
||||
}))
|
||||
|
||||
const { sanitizeStatusValue, normalizeStatusForQuery } = await import(
|
||||
'@/app/components/datasets/documents/status-filter',
|
||||
)
|
||||
|
||||
const { useDocumentSort } = await import(
|
||||
'@/app/components/datasets/documents/components/document-list/hooks/use-document-sort',
|
||||
)
|
||||
const { useDocumentSelection } = await import(
|
||||
'@/app/components/datasets/documents/components/document-list/hooks/use-document-selection',
|
||||
)
|
||||
const { default: useDocumentListQueryState } = await import(
|
||||
'@/app/components/datasets/documents/hooks/use-document-list-query-state',
|
||||
)
|
||||
|
||||
type LocalDoc = SimpleDocumentDetail & { percent?: number }
|
||||
|
||||
const createDoc = (overrides?: Partial<LocalDoc>): LocalDoc => ({
|
||||
id: `doc-${Math.random().toString(36).slice(2, 8)}`,
|
||||
name: 'test-doc.txt',
|
||||
word_count: 500,
|
||||
hit_count: 10,
|
||||
created_at: Date.now() / 1000,
|
||||
data_source_type: DataSourceType.FILE,
|
||||
display_status: 'available',
|
||||
indexing_status: 'completed',
|
||||
enabled: true,
|
||||
archived: false,
|
||||
doc_type: null,
|
||||
doc_metadata: null,
|
||||
position: 1,
|
||||
dataset_process_rule_id: 'rule-1',
|
||||
...overrides,
|
||||
} as LocalDoc)
|
||||
|
||||
describe('Document Management Flow', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Status Filter Utilities', () => {
|
||||
it('should sanitize valid status values', () => {
|
||||
expect(sanitizeStatusValue('all')).toBe('all')
|
||||
expect(sanitizeStatusValue('available')).toBe('available')
|
||||
expect(sanitizeStatusValue('error')).toBe('error')
|
||||
})
|
||||
|
||||
it('should fallback to "all" for invalid values', () => {
|
||||
expect(sanitizeStatusValue(null)).toBe('all')
|
||||
expect(sanitizeStatusValue(undefined)).toBe('all')
|
||||
expect(sanitizeStatusValue('')).toBe('all')
|
||||
expect(sanitizeStatusValue('nonexistent')).toBe('all')
|
||||
})
|
||||
|
||||
it('should handle URL aliases', () => {
|
||||
// 'active' is aliased to 'available'
|
||||
expect(sanitizeStatusValue('active')).toBe('available')
|
||||
})
|
||||
|
||||
it('should normalize status for API query', () => {
|
||||
expect(normalizeStatusForQuery('all')).toBe('all')
|
||||
// 'enabled' normalized to 'available' for query
|
||||
expect(normalizeStatusForQuery('enabled')).toBe('available')
|
||||
})
|
||||
})
|
||||
|
||||
describe('URL-based Query State', () => {
|
||||
it('should parse default query from empty URL params', () => {
|
||||
const { result } = renderHook(() => useDocumentListQueryState())
|
||||
|
||||
expect(result.current.query).toEqual({
|
||||
page: 1,
|
||||
limit: 10,
|
||||
keyword: '',
|
||||
status: 'all',
|
||||
sort: '-created_at',
|
||||
})
|
||||
})
|
||||
|
||||
it('should update query and push to router', () => {
|
||||
const { result } = renderHook(() => useDocumentListQueryState())
|
||||
|
||||
act(() => {
|
||||
result.current.updateQuery({ keyword: 'test', page: 2 })
|
||||
})
|
||||
|
||||
expect(mockPush).toHaveBeenCalled()
|
||||
// The push call should contain the updated query params
|
||||
const pushUrl = mockPush.mock.calls[0][0] as string
|
||||
expect(pushUrl).toContain('keyword=test')
|
||||
expect(pushUrl).toContain('page=2')
|
||||
})
|
||||
|
||||
it('should reset query to defaults', () => {
|
||||
const { result } = renderHook(() => useDocumentListQueryState())
|
||||
|
||||
act(() => {
|
||||
result.current.resetQuery()
|
||||
})
|
||||
|
||||
expect(mockPush).toHaveBeenCalled()
|
||||
// Default query omits default values from URL
|
||||
const pushUrl = mockPush.mock.calls[0][0] as string
|
||||
expect(pushUrl).toBe('/datasets/ds-1/documents')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Document Sort Integration', () => {
|
||||
it('should return documents unsorted when no sort field set', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1', name: 'Banana.txt', word_count: 300 }),
|
||||
createDoc({ id: 'doc-2', name: 'Apple.txt', word_count: 100 }),
|
||||
createDoc({ id: 'doc-3', name: 'Cherry.txt', word_count: 200 }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSort({
|
||||
documents: docs,
|
||||
statusFilterValue: '',
|
||||
remoteSortValue: '-created_at',
|
||||
}))
|
||||
|
||||
expect(result.current.sortField).toBeNull()
|
||||
expect(result.current.sortedDocuments).toHaveLength(3)
|
||||
})
|
||||
|
||||
it('should sort by name descending', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1', name: 'Banana.txt' }),
|
||||
createDoc({ id: 'doc-2', name: 'Apple.txt' }),
|
||||
createDoc({ id: 'doc-3', name: 'Cherry.txt' }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSort({
|
||||
documents: docs,
|
||||
statusFilterValue: '',
|
||||
remoteSortValue: '-created_at',
|
||||
}))
|
||||
|
||||
act(() => {
|
||||
result.current.handleSort('name')
|
||||
})
|
||||
|
||||
expect(result.current.sortField).toBe('name')
|
||||
expect(result.current.sortOrder).toBe('desc')
|
||||
const names = result.current.sortedDocuments.map(d => d.name)
|
||||
expect(names).toEqual(['Cherry.txt', 'Banana.txt', 'Apple.txt'])
|
||||
})
|
||||
|
||||
it('should toggle sort order on same field click', () => {
|
||||
const docs = [createDoc({ id: 'doc-1', name: 'A.txt' }), createDoc({ id: 'doc-2', name: 'B.txt' })]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSort({
|
||||
documents: docs,
|
||||
statusFilterValue: '',
|
||||
remoteSortValue: '-created_at',
|
||||
}))
|
||||
|
||||
act(() => result.current.handleSort('name'))
|
||||
expect(result.current.sortOrder).toBe('desc')
|
||||
|
||||
act(() => result.current.handleSort('name'))
|
||||
expect(result.current.sortOrder).toBe('asc')
|
||||
})
|
||||
|
||||
it('should filter by status before sorting', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1', name: 'A.txt', display_status: 'available' }),
|
||||
createDoc({ id: 'doc-2', name: 'B.txt', display_status: 'error' }),
|
||||
createDoc({ id: 'doc-3', name: 'C.txt', display_status: 'available' }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSort({
|
||||
documents: docs,
|
||||
statusFilterValue: 'available',
|
||||
remoteSortValue: '-created_at',
|
||||
}))
|
||||
|
||||
// Only 'available' documents should remain
|
||||
expect(result.current.sortedDocuments).toHaveLength(2)
|
||||
expect(result.current.sortedDocuments.every(d => d.display_status === 'available')).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Document Selection Integration', () => {
|
||||
it('should manage selection state externally', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1' }),
|
||||
createDoc({ id: 'doc-2' }),
|
||||
createDoc({ id: 'doc-3' }),
|
||||
]
|
||||
const onSelectedIdChange = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: [],
|
||||
onSelectedIdChange,
|
||||
}))
|
||||
|
||||
expect(result.current.isAllSelected).toBe(false)
|
||||
expect(result.current.isSomeSelected).toBe(false)
|
||||
})
|
||||
|
||||
it('should select all documents', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1' }),
|
||||
createDoc({ id: 'doc-2' }),
|
||||
]
|
||||
const onSelectedIdChange = vi.fn()
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: [],
|
||||
onSelectedIdChange,
|
||||
}))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelectAll()
|
||||
})
|
||||
|
||||
expect(onSelectedIdChange).toHaveBeenCalledWith(
|
||||
expect.arrayContaining(['doc-1', 'doc-2']),
|
||||
)
|
||||
})
|
||||
|
||||
it('should detect all-selected state', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1' }),
|
||||
createDoc({ id: 'doc-2' }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: ['doc-1', 'doc-2'],
|
||||
onSelectedIdChange: vi.fn(),
|
||||
}))
|
||||
|
||||
expect(result.current.isAllSelected).toBe(true)
|
||||
})
|
||||
|
||||
it('should detect partial selection', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1' }),
|
||||
createDoc({ id: 'doc-2' }),
|
||||
createDoc({ id: 'doc-3' }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: ['doc-1'],
|
||||
onSelectedIdChange: vi.fn(),
|
||||
}))
|
||||
|
||||
expect(result.current.isSomeSelected).toBe(true)
|
||||
expect(result.current.isAllSelected).toBe(false)
|
||||
})
|
||||
|
||||
it('should identify downloadable selected documents (FILE type only)', () => {
|
||||
const docs = [
|
||||
createDoc({ id: 'doc-1', data_source_type: DataSourceType.FILE }),
|
||||
createDoc({ id: 'doc-2', data_source_type: DataSourceType.NOTION }),
|
||||
]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: ['doc-1', 'doc-2'],
|
||||
onSelectedIdChange: vi.fn(),
|
||||
}))
|
||||
|
||||
expect(result.current.downloadableSelectedIds).toEqual(['doc-1'])
|
||||
})
|
||||
|
||||
it('should clear selection', () => {
|
||||
const onSelectedIdChange = vi.fn()
|
||||
const docs = [createDoc({ id: 'doc-1' })]
|
||||
|
||||
const { result } = renderHook(() => useDocumentSelection({
|
||||
documents: docs,
|
||||
selectedIds: ['doc-1'],
|
||||
onSelectedIdChange,
|
||||
}))
|
||||
|
||||
act(() => {
|
||||
result.current.clearSelection()
|
||||
})
|
||||
|
||||
expect(onSelectedIdChange).toHaveBeenCalledWith([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Cross-Module: Query State → Sort → Selection Pipeline', () => {
|
||||
it('should maintain consistent default state across all hooks', () => {
|
||||
const docs = [createDoc({ id: 'doc-1' })]
|
||||
const { result: queryResult } = renderHook(() => useDocumentListQueryState())
|
||||
const { result: sortResult } = renderHook(() => useDocumentSort({
|
||||
documents: docs,
|
||||
statusFilterValue: queryResult.current.query.status,
|
||||
remoteSortValue: queryResult.current.query.sort,
|
||||
}))
|
||||
const { result: selResult } = renderHook(() => useDocumentSelection({
|
||||
documents: sortResult.current.sortedDocuments,
|
||||
selectedIds: [],
|
||||
onSelectedIdChange: vi.fn(),
|
||||
}))
|
||||
|
||||
// Query defaults
|
||||
expect(queryResult.current.query.sort).toBe('-created_at')
|
||||
expect(queryResult.current.query.status).toBe('all')
|
||||
|
||||
// Sort inherits 'all' status → no filtering applied
|
||||
expect(sortResult.current.sortedDocuments).toHaveLength(1)
|
||||
|
||||
// Selection starts empty
|
||||
expect(selResult.current.isAllSelected).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
215
web/__tests__/datasets/external-knowledge-base.test.tsx
Normal file
215
web/__tests__/datasets/external-knowledge-base.test.tsx
Normal file
@@ -0,0 +1,215 @@
|
||||
/**
|
||||
* Integration Test: External Knowledge Base Creation Flow
|
||||
*
|
||||
* Tests the data contract, validation logic, and API interaction
|
||||
* for external knowledge base creation.
|
||||
*/
|
||||
|
||||
import type { CreateKnowledgeBaseReq } from '@/app/components/datasets/external-knowledge-base/create/declarations'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
// --- Factory ---
|
||||
const createFormData = (overrides?: Partial<CreateKnowledgeBaseReq>): CreateKnowledgeBaseReq => ({
|
||||
name: 'My External KB',
|
||||
description: 'A test external knowledge base',
|
||||
external_knowledge_api_id: 'api-1',
|
||||
external_knowledge_id: 'ext-kb-123',
|
||||
external_retrieval_model: {
|
||||
top_k: 4,
|
||||
score_threshold: 0.5,
|
||||
score_threshold_enabled: false,
|
||||
},
|
||||
provider: 'external',
|
||||
...overrides,
|
||||
})
|
||||
|
||||
describe('External Knowledge Base Creation Flow', () => {
|
||||
describe('Data Contract: CreateKnowledgeBaseReq', () => {
|
||||
it('should define a complete form structure', () => {
|
||||
const form = createFormData()
|
||||
|
||||
expect(form).toHaveProperty('name')
|
||||
expect(form).toHaveProperty('external_knowledge_api_id')
|
||||
expect(form).toHaveProperty('external_knowledge_id')
|
||||
expect(form).toHaveProperty('external_retrieval_model')
|
||||
expect(form).toHaveProperty('provider')
|
||||
expect(form.provider).toBe('external')
|
||||
})
|
||||
|
||||
it('should include retrieval model settings', () => {
|
||||
const form = createFormData()
|
||||
|
||||
expect(form.external_retrieval_model).toEqual({
|
||||
top_k: 4,
|
||||
score_threshold: 0.5,
|
||||
score_threshold_enabled: false,
|
||||
})
|
||||
})
|
||||
|
||||
it('should allow partial overrides', () => {
|
||||
const form = createFormData({
|
||||
name: 'Custom Name',
|
||||
external_retrieval_model: {
|
||||
top_k: 10,
|
||||
score_threshold: 0.8,
|
||||
score_threshold_enabled: true,
|
||||
},
|
||||
})
|
||||
|
||||
expect(form.name).toBe('Custom Name')
|
||||
expect(form.external_retrieval_model.top_k).toBe(10)
|
||||
expect(form.external_retrieval_model.score_threshold_enabled).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Form Validation Logic', () => {
|
||||
const isFormValid = (form: CreateKnowledgeBaseReq): boolean => {
|
||||
return (
|
||||
form.name.trim() !== ''
|
||||
&& form.external_knowledge_api_id !== ''
|
||||
&& form.external_knowledge_id !== ''
|
||||
&& form.external_retrieval_model.top_k !== undefined
|
||||
&& form.external_retrieval_model.score_threshold !== undefined
|
||||
)
|
||||
}
|
||||
|
||||
it('should validate a complete form', () => {
|
||||
const form = createFormData()
|
||||
expect(isFormValid(form)).toBe(true)
|
||||
})
|
||||
|
||||
it('should reject empty name', () => {
|
||||
const form = createFormData({ name: '' })
|
||||
expect(isFormValid(form)).toBe(false)
|
||||
})
|
||||
|
||||
it('should reject whitespace-only name', () => {
|
||||
const form = createFormData({ name: ' ' })
|
||||
expect(isFormValid(form)).toBe(false)
|
||||
})
|
||||
|
||||
it('should reject empty external_knowledge_api_id', () => {
|
||||
const form = createFormData({ external_knowledge_api_id: '' })
|
||||
expect(isFormValid(form)).toBe(false)
|
||||
})
|
||||
|
||||
it('should reject empty external_knowledge_id', () => {
|
||||
const form = createFormData({ external_knowledge_id: '' })
|
||||
expect(isFormValid(form)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Form State Transitions', () => {
|
||||
it('should start with empty default state', () => {
|
||||
const defaultForm: CreateKnowledgeBaseReq = {
|
||||
name: '',
|
||||
description: '',
|
||||
external_knowledge_api_id: '',
|
||||
external_knowledge_id: '',
|
||||
external_retrieval_model: {
|
||||
top_k: 4,
|
||||
score_threshold: 0.5,
|
||||
score_threshold_enabled: false,
|
||||
},
|
||||
provider: 'external',
|
||||
}
|
||||
|
||||
// Verify default state matches component's initial useState
|
||||
expect(defaultForm.name).toBe('')
|
||||
expect(defaultForm.external_knowledge_api_id).toBe('')
|
||||
expect(defaultForm.external_knowledge_id).toBe('')
|
||||
expect(defaultForm.provider).toBe('external')
|
||||
})
|
||||
|
||||
it('should support immutable form updates', () => {
|
||||
const form = createFormData({ name: '' })
|
||||
const updated = { ...form, name: 'Updated Name' }
|
||||
|
||||
expect(form.name).toBe('')
|
||||
expect(updated.name).toBe('Updated Name')
|
||||
// Other fields should remain unchanged
|
||||
expect(updated.external_knowledge_api_id).toBe(form.external_knowledge_api_id)
|
||||
})
|
||||
|
||||
it('should support retrieval model updates', () => {
|
||||
const form = createFormData()
|
||||
const updated = {
|
||||
...form,
|
||||
external_retrieval_model: {
|
||||
...form.external_retrieval_model,
|
||||
top_k: 10,
|
||||
score_threshold_enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
expect(updated.external_retrieval_model.top_k).toBe(10)
|
||||
expect(updated.external_retrieval_model.score_threshold_enabled).toBe(true)
|
||||
// Unchanged field
|
||||
expect(updated.external_retrieval_model.score_threshold).toBe(0.5)
|
||||
})
|
||||
})
|
||||
|
||||
describe('API Call Data Contract', () => {
|
||||
it('should produce a valid API payload from form data', () => {
|
||||
const form = createFormData()
|
||||
|
||||
// The API expects the full CreateKnowledgeBaseReq
|
||||
expect(form.name).toBeTruthy()
|
||||
expect(form.external_knowledge_api_id).toBeTruthy()
|
||||
expect(form.external_knowledge_id).toBeTruthy()
|
||||
expect(form.provider).toBe('external')
|
||||
expect(typeof form.external_retrieval_model.top_k).toBe('number')
|
||||
expect(typeof form.external_retrieval_model.score_threshold).toBe('number')
|
||||
expect(typeof form.external_retrieval_model.score_threshold_enabled).toBe('boolean')
|
||||
})
|
||||
|
||||
it('should support optional description', () => {
|
||||
const formWithDesc = createFormData({ description: 'Some description' })
|
||||
const formWithoutDesc = createFormData({ description: '' })
|
||||
|
||||
expect(formWithDesc.description).toBe('Some description')
|
||||
expect(formWithoutDesc.description).toBe('')
|
||||
})
|
||||
|
||||
it('should validate retrieval model bounds', () => {
|
||||
const form = createFormData({
|
||||
external_retrieval_model: {
|
||||
top_k: 0,
|
||||
score_threshold: 0,
|
||||
score_threshold_enabled: false,
|
||||
},
|
||||
})
|
||||
|
||||
expect(form.external_retrieval_model.top_k).toBe(0)
|
||||
expect(form.external_retrieval_model.score_threshold).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('External API List Integration', () => {
|
||||
it('should validate API item structure', () => {
|
||||
const apiItem = {
|
||||
id: 'api-1',
|
||||
name: 'Production API',
|
||||
settings: {
|
||||
endpoint: 'https://api.example.com',
|
||||
api_key: 'key-123',
|
||||
},
|
||||
}
|
||||
|
||||
expect(apiItem).toHaveProperty('id')
|
||||
expect(apiItem).toHaveProperty('name')
|
||||
expect(apiItem).toHaveProperty('settings')
|
||||
expect(apiItem.settings).toHaveProperty('endpoint')
|
||||
expect(apiItem.settings).toHaveProperty('api_key')
|
||||
})
|
||||
|
||||
it('should link API selection to form data', () => {
|
||||
const selectedApi = { id: 'api-2', name: 'Staging API' }
|
||||
const form = createFormData({
|
||||
external_knowledge_api_id: selectedApi.id,
|
||||
})
|
||||
|
||||
expect(form.external_knowledge_api_id).toBe('api-2')
|
||||
})
|
||||
})
|
||||
})
|
||||
404
web/__tests__/datasets/hit-testing-flow.test.tsx
Normal file
404
web/__tests__/datasets/hit-testing-flow.test.tsx
Normal file
@@ -0,0 +1,404 @@
|
||||
/**
|
||||
* Integration Test: Hit Testing Flow
|
||||
*
|
||||
* Tests the query submission → API response → callback chain flow
|
||||
* by rendering the actual QueryInput component and triggering user interactions.
|
||||
* Validates that the production onSubmit logic correctly constructs payloads
|
||||
* and invokes callbacks on success/failure.
|
||||
*/
|
||||
|
||||
import type {
|
||||
HitTestingResponse,
|
||||
Query,
|
||||
} from '@/models/datasets'
|
||||
import type { RetrievalConfig } from '@/types/app'
|
||||
import { fireEvent, render, screen, waitFor } from '@testing-library/react'
|
||||
import QueryInput from '@/app/components/datasets/hit-testing/components/query-input'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
|
||||
// --- Mocks ---
|
||||
|
||||
vi.mock('@/context/dataset-detail', () => ({
|
||||
default: {},
|
||||
useDatasetDetailContext: vi.fn(() => ({ dataset: undefined })),
|
||||
useDatasetDetailContextWithSelector: vi.fn(() => false),
|
||||
}))
|
||||
|
||||
vi.mock('use-context-selector', () => ({
|
||||
useContext: vi.fn(() => ({})),
|
||||
useContextSelector: vi.fn(() => false),
|
||||
createContext: vi.fn(() => ({})),
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/datasets/common/image-uploader/image-uploader-in-retrieval-testing', () => ({
|
||||
default: ({ textArea, actionButton }: { textArea: React.ReactNode, actionButton: React.ReactNode }) => (
|
||||
<div data-testid="image-uploader-mock">
|
||||
{textArea}
|
||||
{actionButton}
|
||||
</div>
|
||||
),
|
||||
}))
|
||||
|
||||
// --- Factories ---
|
||||
|
||||
const createRetrievalConfig = (overrides = {}): RetrievalConfig => ({
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
reranking_enable: false,
|
||||
reranking_mode: undefined,
|
||||
reranking_model: {
|
||||
reranking_provider_name: '',
|
||||
reranking_model_name: '',
|
||||
},
|
||||
weights: undefined,
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
score_threshold: 0.5,
|
||||
...overrides,
|
||||
} as RetrievalConfig)
|
||||
|
||||
const createHitTestingResponse = (numResults: number): HitTestingResponse => ({
|
||||
query: {
|
||||
content: 'What is Dify?',
|
||||
tsne_position: { x: 0, y: 0 },
|
||||
},
|
||||
records: Array.from({ length: numResults }, (_, i) => ({
|
||||
segment: {
|
||||
id: `seg-${i}`,
|
||||
document: {
|
||||
id: `doc-${i}`,
|
||||
data_source_type: 'upload_file',
|
||||
name: `document-${i}.txt`,
|
||||
doc_type: null as unknown as import('@/models/datasets').DocType,
|
||||
},
|
||||
content: `Result content ${i}`,
|
||||
sign_content: `Result content ${i}`,
|
||||
position: i + 1,
|
||||
word_count: 100 + i * 50,
|
||||
tokens: 50 + i * 25,
|
||||
keywords: ['test', 'dify'],
|
||||
hit_count: i * 5,
|
||||
index_node_hash: `hash-${i}`,
|
||||
answer: '',
|
||||
},
|
||||
content: {
|
||||
id: `seg-${i}`,
|
||||
document: {
|
||||
id: `doc-${i}`,
|
||||
data_source_type: 'upload_file',
|
||||
name: `document-${i}.txt`,
|
||||
doc_type: null as unknown as import('@/models/datasets').DocType,
|
||||
},
|
||||
content: `Result content ${i}`,
|
||||
sign_content: `Result content ${i}`,
|
||||
position: i + 1,
|
||||
word_count: 100 + i * 50,
|
||||
tokens: 50 + i * 25,
|
||||
keywords: ['test', 'dify'],
|
||||
hit_count: i * 5,
|
||||
index_node_hash: `hash-${i}`,
|
||||
answer: '',
|
||||
},
|
||||
score: 0.95 - i * 0.1,
|
||||
tsne_position: { x: 0, y: 0 },
|
||||
child_chunks: null,
|
||||
files: [],
|
||||
})),
|
||||
})
|
||||
|
||||
const createTextQuery = (content: string): Query[] => [
|
||||
{ content, content_type: 'text_query', file_info: null },
|
||||
]
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
const findSubmitButton = () => {
|
||||
const buttons = screen.getAllByRole('button')
|
||||
const submitButton = buttons.find(btn => btn.classList.contains('w-[88px]'))
|
||||
expect(submitButton).toBeTruthy()
|
||||
return submitButton!
|
||||
}
|
||||
|
||||
// --- Tests ---
|
||||
|
||||
describe('Hit Testing Flow', () => {
|
||||
const mockHitTestingMutation = vi.fn()
|
||||
const mockExternalMutation = vi.fn()
|
||||
const mockSetHitResult = vi.fn()
|
||||
const mockSetExternalHitResult = vi.fn()
|
||||
const mockOnUpdateList = vi.fn()
|
||||
const mockSetQueries = vi.fn()
|
||||
const mockOnClickRetrievalMethod = vi.fn()
|
||||
const mockOnSubmit = vi.fn()
|
||||
|
||||
const createDefaultProps = (overrides: Record<string, unknown> = {}) => ({
|
||||
onUpdateList: mockOnUpdateList,
|
||||
setHitResult: mockSetHitResult,
|
||||
setExternalHitResult: mockSetExternalHitResult,
|
||||
loading: false,
|
||||
queries: [] as Query[],
|
||||
setQueries: mockSetQueries,
|
||||
isExternal: false,
|
||||
onClickRetrievalMethod: mockOnClickRetrievalMethod,
|
||||
retrievalConfig: createRetrievalConfig(),
|
||||
isEconomy: false,
|
||||
onSubmit: mockOnSubmit,
|
||||
hitTestingMutation: mockHitTestingMutation,
|
||||
externalKnowledgeBaseHitTestingMutation: mockExternalMutation,
|
||||
...overrides,
|
||||
})
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Query Submission → API Call', () => {
|
||||
it('should call hitTestingMutation with correct payload including retrieval model', async () => {
|
||||
const retrievalConfig = createRetrievalConfig({
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
})
|
||||
mockHitTestingMutation.mockResolvedValue(createHitTestingResponse(3))
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('How does RAG work?'),
|
||||
retrievalConfig,
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHitTestingMutation).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
query: 'How does RAG work?',
|
||||
attachment_ids: [],
|
||||
retrieval_model: expect.objectContaining({
|
||||
search_method: RETRIEVE_METHOD.semantic,
|
||||
top_k: 3,
|
||||
score_threshold_enabled: false,
|
||||
}),
|
||||
}),
|
||||
expect.objectContaining({
|
||||
onSuccess: expect.any(Function),
|
||||
}),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('should override search_method to keywordSearch when isEconomy is true', async () => {
|
||||
const retrievalConfig = createRetrievalConfig({ search_method: RETRIEVE_METHOD.semantic })
|
||||
mockHitTestingMutation.mockResolvedValue(createHitTestingResponse(1))
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('test query'),
|
||||
retrievalConfig,
|
||||
isEconomy: true,
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHitTestingMutation).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
retrieval_model: expect.objectContaining({
|
||||
search_method: RETRIEVE_METHOD.keywordSearch,
|
||||
}),
|
||||
}),
|
||||
expect.anything(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle empty results by calling setHitResult with empty records', async () => {
|
||||
const emptyResponse = createHitTestingResponse(0)
|
||||
mockHitTestingMutation.mockImplementation(async (_params: unknown, options?: { onSuccess?: (data: HitTestingResponse) => void }) => {
|
||||
options?.onSuccess?.(emptyResponse)
|
||||
return emptyResponse
|
||||
})
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('nonexistent topic'),
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetHitResult).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ records: [] }),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
it('should not call success callbacks when mutation resolves without onSuccess', async () => {
|
||||
// Simulate a mutation that resolves but does not invoke the onSuccess callback
|
||||
mockHitTestingMutation.mockResolvedValue(undefined)
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('test'),
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockHitTestingMutation).toHaveBeenCalled()
|
||||
})
|
||||
// Success callbacks should not fire when onSuccess is not invoked
|
||||
expect(mockSetHitResult).not.toHaveBeenCalled()
|
||||
expect(mockOnUpdateList).not.toHaveBeenCalled()
|
||||
expect(mockOnSubmit).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('API Response → Results Data Contract', () => {
|
||||
it('should produce results with required segment fields for rendering', () => {
|
||||
const response = createHitTestingResponse(3)
|
||||
|
||||
// Validate each result has the fields needed by ResultItem component
|
||||
response.records.forEach((record) => {
|
||||
expect(record.segment).toHaveProperty('id')
|
||||
expect(record.segment).toHaveProperty('content')
|
||||
expect(record.segment).toHaveProperty('position')
|
||||
expect(record.segment).toHaveProperty('word_count')
|
||||
expect(record.segment).toHaveProperty('document')
|
||||
expect(record.segment.document).toHaveProperty('name')
|
||||
expect(record.score).toBeGreaterThanOrEqual(0)
|
||||
expect(record.score).toBeLessThanOrEqual(1)
|
||||
})
|
||||
})
|
||||
|
||||
it('should maintain correct score ordering', () => {
|
||||
const response = createHitTestingResponse(5)
|
||||
|
||||
for (let i = 1; i < response.records.length; i++) {
|
||||
expect(response.records[i - 1].score).toBeGreaterThanOrEqual(response.records[i].score)
|
||||
}
|
||||
})
|
||||
|
||||
it('should include document metadata for result item display', () => {
|
||||
const response = createHitTestingResponse(1)
|
||||
const record = response.records[0]
|
||||
|
||||
expect(record.segment.document.name).toBeTruthy()
|
||||
expect(record.segment.document.data_source_type).toBeTruthy()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Successful Submission → Callback Chain', () => {
|
||||
it('should call setHitResult, onUpdateList, and onSubmit after successful submission', async () => {
|
||||
const response = createHitTestingResponse(3)
|
||||
mockHitTestingMutation.mockImplementation(async (_params: unknown, options?: { onSuccess?: (data: HitTestingResponse) => void }) => {
|
||||
options?.onSuccess?.(response)
|
||||
return response
|
||||
})
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('Test query'),
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetHitResult).toHaveBeenCalledWith(response)
|
||||
expect(mockOnUpdateList).toHaveBeenCalledTimes(1)
|
||||
expect(mockOnSubmit).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
|
||||
it('should trigger records list refresh via onUpdateList after query', async () => {
|
||||
const response = createHitTestingResponse(1)
|
||||
mockHitTestingMutation.mockImplementation(async (_params: unknown, options?: { onSuccess?: (data: HitTestingResponse) => void }) => {
|
||||
options?.onSuccess?.(response)
|
||||
return response
|
||||
})
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('new query'),
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockOnUpdateList).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('External KB Hit Testing', () => {
|
||||
it('should use external mutation with correct payload for external datasets', async () => {
|
||||
mockExternalMutation.mockImplementation(async (_params: unknown, options?: { onSuccess?: (data: { records: never[] }) => void }) => {
|
||||
const response = { records: [] }
|
||||
options?.onSuccess?.(response)
|
||||
return response
|
||||
})
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('test'),
|
||||
isExternal: true,
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockExternalMutation).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
query: 'test',
|
||||
external_retrieval_model: expect.objectContaining({
|
||||
top_k: 4,
|
||||
score_threshold: 0.5,
|
||||
score_threshold_enabled: false,
|
||||
}),
|
||||
}),
|
||||
expect.objectContaining({
|
||||
onSuccess: expect.any(Function),
|
||||
}),
|
||||
)
|
||||
// Internal mutation should NOT be called
|
||||
expect(mockHitTestingMutation).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
it('should call setExternalHitResult and onUpdateList on successful external submission', async () => {
|
||||
const externalResponse = { records: [] }
|
||||
mockExternalMutation.mockImplementation(async (_params: unknown, options?: { onSuccess?: (data: { records: never[] }) => void }) => {
|
||||
options?.onSuccess?.(externalResponse)
|
||||
return externalResponse
|
||||
})
|
||||
|
||||
render(
|
||||
<QueryInput {...createDefaultProps({
|
||||
queries: createTextQuery('external query'),
|
||||
isExternal: true,
|
||||
})}
|
||||
/>,
|
||||
)
|
||||
|
||||
fireEvent.click(findSubmitButton())
|
||||
|
||||
await waitFor(() => {
|
||||
expect(mockSetExternalHitResult).toHaveBeenCalledWith(externalResponse)
|
||||
expect(mockOnUpdateList).toHaveBeenCalledTimes(1)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
337
web/__tests__/datasets/metadata-management-flow.test.tsx
Normal file
337
web/__tests__/datasets/metadata-management-flow.test.tsx
Normal file
@@ -0,0 +1,337 @@
|
||||
/**
|
||||
* Integration Test: Metadata Management Flow
|
||||
*
|
||||
* Tests the cross-module composition of metadata name validation, type constraints,
|
||||
* and duplicate detection across the metadata management hooks.
|
||||
*
|
||||
* The unit-level use-check-metadata-name.spec.ts tests the validation hook alone.
|
||||
* This integration test verifies:
|
||||
* - Name validation combined with existing metadata list (duplicate detection)
|
||||
* - Metadata type enum constraints matching expected data model
|
||||
* - Full add/rename workflow: validate name → check duplicates → allow or reject
|
||||
* - Name uniqueness logic: existing metadata keeps its own name, cannot take another's
|
||||
*/
|
||||
|
||||
import type { MetadataItemWithValueLength } from '@/app/components/datasets/metadata/types'
|
||||
import { renderHook } from '@testing-library/react'
|
||||
import { DataType } from '@/app/components/datasets/metadata/types'
|
||||
|
||||
vi.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => key,
|
||||
}),
|
||||
}))
|
||||
|
||||
const { default: useCheckMetadataName } = await import(
|
||||
'@/app/components/datasets/metadata/hooks/use-check-metadata-name',
|
||||
)
|
||||
|
||||
// --- Factory functions ---
|
||||
|
||||
const createMetadataItem = (
|
||||
id: string,
|
||||
name: string,
|
||||
type = DataType.string,
|
||||
count = 0,
|
||||
): MetadataItemWithValueLength => ({
|
||||
id,
|
||||
name,
|
||||
type,
|
||||
count,
|
||||
})
|
||||
|
||||
const createMetadataList = (): MetadataItemWithValueLength[] => [
|
||||
createMetadataItem('meta-1', 'author', DataType.string, 5),
|
||||
createMetadataItem('meta-2', 'created_date', DataType.time, 10),
|
||||
createMetadataItem('meta-3', 'page_count', DataType.number, 3),
|
||||
createMetadataItem('meta-4', 'source_url', DataType.string, 8),
|
||||
createMetadataItem('meta-5', 'version', DataType.number, 2),
|
||||
]
|
||||
|
||||
describe('Metadata Management Flow - Cross-Module Validation Composition', () => {
|
||||
describe('Name Validation Flow: Format Rules', () => {
|
||||
it('should accept valid lowercase names with underscores', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
expect(result.current.checkName('valid_name').errorMsg).toBe('')
|
||||
expect(result.current.checkName('author').errorMsg).toBe('')
|
||||
expect(result.current.checkName('page_count').errorMsg).toBe('')
|
||||
expect(result.current.checkName('v2_field').errorMsg).toBe('')
|
||||
})
|
||||
|
||||
it('should reject empty names', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
expect(result.current.checkName('').errorMsg).toBeTruthy()
|
||||
})
|
||||
|
||||
it('should reject names with invalid characters', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
expect(result.current.checkName('Author').errorMsg).toBeTruthy()
|
||||
expect(result.current.checkName('my-field').errorMsg).toBeTruthy()
|
||||
expect(result.current.checkName('field name').errorMsg).toBeTruthy()
|
||||
expect(result.current.checkName('1field').errorMsg).toBeTruthy()
|
||||
expect(result.current.checkName('_private').errorMsg).toBeTruthy()
|
||||
})
|
||||
|
||||
it('should reject names exceeding 255 characters', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
const longName = 'a'.repeat(256)
|
||||
expect(result.current.checkName(longName).errorMsg).toBeTruthy()
|
||||
|
||||
const maxName = 'a'.repeat(255)
|
||||
expect(result.current.checkName(maxName).errorMsg).toBe('')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Metadata Type Constraints: Enum Values Match Expected Set', () => {
|
||||
it('should define exactly three data types', () => {
|
||||
const typeValues = Object.values(DataType)
|
||||
expect(typeValues).toHaveLength(3)
|
||||
})
|
||||
|
||||
it('should include string, number, and time types', () => {
|
||||
expect(DataType.string).toBe('string')
|
||||
expect(DataType.number).toBe('number')
|
||||
expect(DataType.time).toBe('time')
|
||||
})
|
||||
|
||||
it('should use consistent types in metadata items', () => {
|
||||
const metadataList = createMetadataList()
|
||||
|
||||
const stringItems = metadataList.filter(m => m.type === DataType.string)
|
||||
const numberItems = metadataList.filter(m => m.type === DataType.number)
|
||||
const timeItems = metadataList.filter(m => m.type === DataType.time)
|
||||
|
||||
expect(stringItems).toHaveLength(2)
|
||||
expect(numberItems).toHaveLength(2)
|
||||
expect(timeItems).toHaveLength(1)
|
||||
})
|
||||
|
||||
it('should enforce type-safe metadata item construction', () => {
|
||||
const item = createMetadataItem('test-1', 'test_field', DataType.number, 0)
|
||||
|
||||
expect(item.id).toBe('test-1')
|
||||
expect(item.name).toBe('test_field')
|
||||
expect(item.type).toBe(DataType.number)
|
||||
expect(item.count).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Duplicate Name Detection: Add Metadata → Check Name → Detect Duplicates', () => {
|
||||
it('should detect duplicate names against an existing metadata list', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const checkDuplicate = (newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
return existingMetadata.some(m => m.name === newName)
|
||||
}
|
||||
|
||||
expect(checkDuplicate('author')).toBe(true)
|
||||
expect(checkDuplicate('created_date')).toBe(true)
|
||||
expect(checkDuplicate('page_count')).toBe(true)
|
||||
})
|
||||
|
||||
it('should allow names that do not conflict with existing metadata', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const isNameAvailable = (newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
return !existingMetadata.some(m => m.name === newName)
|
||||
}
|
||||
|
||||
expect(isNameAvailable('category')).toBe(true)
|
||||
expect(isNameAvailable('file_size')).toBe(true)
|
||||
expect(isNameAvailable('language')).toBe(true)
|
||||
})
|
||||
|
||||
it('should reject names that fail format validation before duplicate check', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
const validateAndCheckDuplicate = (newName: string): { valid: boolean, reason: string } => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return { valid: false, reason: 'format' }
|
||||
return { valid: true, reason: '' }
|
||||
}
|
||||
|
||||
expect(validateAndCheckDuplicate('Author').reason).toBe('format')
|
||||
expect(validateAndCheckDuplicate('').reason).toBe('format')
|
||||
expect(validateAndCheckDuplicate('valid_name').valid).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Name Uniqueness Across Edits: Rename Workflow', () => {
|
||||
it('should allow an existing metadata item to keep its own name', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const isRenameValid = (itemId: string, newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
// Allow keeping the same name (skip self in duplicate check)
|
||||
return !existingMetadata.some(m => m.name === newName && m.id !== itemId)
|
||||
}
|
||||
|
||||
// Author keeping its own name should be valid
|
||||
expect(isRenameValid('meta-1', 'author')).toBe(true)
|
||||
// page_count keeping its own name should be valid
|
||||
expect(isRenameValid('meta-3', 'page_count')).toBe(true)
|
||||
})
|
||||
|
||||
it('should reject renaming to another existing metadata name', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const isRenameValid = (itemId: string, newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
return !existingMetadata.some(m => m.name === newName && m.id !== itemId)
|
||||
}
|
||||
|
||||
// Author trying to rename to "page_count" (taken by meta-3)
|
||||
expect(isRenameValid('meta-1', 'page_count')).toBe(false)
|
||||
// version trying to rename to "source_url" (taken by meta-4)
|
||||
expect(isRenameValid('meta-5', 'source_url')).toBe(false)
|
||||
})
|
||||
|
||||
it('should allow renaming to a completely new valid name', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const isRenameValid = (itemId: string, newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
return !existingMetadata.some(m => m.name === newName && m.id !== itemId)
|
||||
}
|
||||
|
||||
expect(isRenameValid('meta-1', 'document_author')).toBe(true)
|
||||
expect(isRenameValid('meta-2', 'publish_date')).toBe(true)
|
||||
expect(isRenameValid('meta-3', 'total_pages')).toBe(true)
|
||||
})
|
||||
|
||||
it('should reject renaming with an invalid format even if name is unique', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const isRenameValid = (itemId: string, newName: string): boolean => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return false
|
||||
return !existingMetadata.some(m => m.name === newName && m.id !== itemId)
|
||||
}
|
||||
|
||||
expect(isRenameValid('meta-1', 'New Author')).toBe(false)
|
||||
expect(isRenameValid('meta-2', '2024_date')).toBe(false)
|
||||
expect(isRenameValid('meta-3', '')).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Full Metadata Management Workflow', () => {
|
||||
it('should support a complete add-validate-check-duplicate cycle', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const addMetadataField = (
|
||||
name: string,
|
||||
type: DataType,
|
||||
): { success: boolean, error?: string } => {
|
||||
const formatCheck = result.current.checkName(name)
|
||||
if (formatCheck.errorMsg)
|
||||
return { success: false, error: 'invalid_format' }
|
||||
|
||||
if (existingMetadata.some(m => m.name === name))
|
||||
return { success: false, error: 'duplicate_name' }
|
||||
|
||||
existingMetadata.push(createMetadataItem(`meta-${existingMetadata.length + 1}`, name, type))
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
// Add a valid new field
|
||||
const result1 = addMetadataField('department', DataType.string)
|
||||
expect(result1.success).toBe(true)
|
||||
expect(existingMetadata).toHaveLength(6)
|
||||
|
||||
// Try to add a duplicate
|
||||
const result2 = addMetadataField('author', DataType.string)
|
||||
expect(result2.success).toBe(false)
|
||||
expect(result2.error).toBe('duplicate_name')
|
||||
expect(existingMetadata).toHaveLength(6)
|
||||
|
||||
// Try to add an invalid name
|
||||
const result3 = addMetadataField('Invalid Name', DataType.string)
|
||||
expect(result3.success).toBe(false)
|
||||
expect(result3.error).toBe('invalid_format')
|
||||
expect(existingMetadata).toHaveLength(6)
|
||||
|
||||
// Add another valid field
|
||||
const result4 = addMetadataField('priority_level', DataType.number)
|
||||
expect(result4.success).toBe(true)
|
||||
expect(existingMetadata).toHaveLength(7)
|
||||
})
|
||||
|
||||
it('should support a complete rename workflow with validation chain', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
const existingMetadata = createMetadataList()
|
||||
|
||||
const renameMetadataField = (
|
||||
itemId: string,
|
||||
newName: string,
|
||||
): { success: boolean, error?: string } => {
|
||||
const formatCheck = result.current.checkName(newName)
|
||||
if (formatCheck.errorMsg)
|
||||
return { success: false, error: 'invalid_format' }
|
||||
|
||||
if (existingMetadata.some(m => m.name === newName && m.id !== itemId))
|
||||
return { success: false, error: 'duplicate_name' }
|
||||
|
||||
const item = existingMetadata.find(m => m.id === itemId)
|
||||
if (!item)
|
||||
return { success: false, error: 'not_found' }
|
||||
|
||||
// Simulate the rename in-place
|
||||
const index = existingMetadata.indexOf(item)
|
||||
existingMetadata[index] = { ...item, name: newName }
|
||||
return { success: true }
|
||||
}
|
||||
|
||||
// Rename author to document_author
|
||||
expect(renameMetadataField('meta-1', 'document_author').success).toBe(true)
|
||||
expect(existingMetadata.find(m => m.id === 'meta-1')?.name).toBe('document_author')
|
||||
|
||||
// Try renaming created_date to page_count (already taken)
|
||||
expect(renameMetadataField('meta-2', 'page_count').error).toBe('duplicate_name')
|
||||
|
||||
// Rename to invalid format
|
||||
expect(renameMetadataField('meta-3', 'Page Count').error).toBe('invalid_format')
|
||||
|
||||
// Rename non-existent item
|
||||
expect(renameMetadataField('meta-999', 'something').error).toBe('not_found')
|
||||
})
|
||||
|
||||
it('should maintain validation consistency across multiple operations', () => {
|
||||
const { result } = renderHook(() => useCheckMetadataName())
|
||||
|
||||
// Validate the same name multiple times for consistency
|
||||
const name = 'consistent_field'
|
||||
const results = Array.from({ length: 5 }, () => result.current.checkName(name))
|
||||
|
||||
expect(results.every(r => r.errorMsg === '')).toBe(true)
|
||||
|
||||
// Validate an invalid name multiple times
|
||||
const invalidResults = Array.from({ length: 5 }, () => result.current.checkName('Invalid'))
|
||||
expect(invalidResults.every(r => r.errorMsg !== '')).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
477
web/__tests__/datasets/pipeline-datasource-flow.test.tsx
Normal file
477
web/__tests__/datasets/pipeline-datasource-flow.test.tsx
Normal file
@@ -0,0 +1,477 @@
|
||||
/**
|
||||
* Integration Test: Pipeline Data Source Store Composition
|
||||
*
|
||||
* Tests cross-slice interactions in the pipeline data source Zustand store.
|
||||
* The unit-level slice specs test each slice in isolation.
|
||||
* This integration test verifies:
|
||||
* - Store initialization produces correct defaults across all slices
|
||||
* - Cross-slice coordination (e.g. credential shared across slices)
|
||||
* - State isolation: changes in one slice do not affect others
|
||||
* - Full workflow simulation through credential → source → data path
|
||||
*/
|
||||
|
||||
import type { NotionPage } from '@/models/common'
|
||||
import type { CrawlResultItem, FileItem } from '@/models/datasets'
|
||||
import type { OnlineDriveFile } from '@/models/pipeline'
|
||||
import { createDataSourceStore } from '@/app/components/datasets/documents/create-from-pipeline/data-source/store'
|
||||
import { CrawlStep } from '@/models/datasets'
|
||||
import { OnlineDriveFileType } from '@/models/pipeline'
|
||||
|
||||
// --- Factory functions ---
|
||||
|
||||
const createFileItem = (id: string): FileItem => ({
|
||||
fileID: id,
|
||||
file: { id, name: `${id}.txt`, size: 1024 } as FileItem['file'],
|
||||
progress: 100,
|
||||
})
|
||||
|
||||
const createCrawlResultItem = (url: string, title?: string): CrawlResultItem => ({
|
||||
title: title ?? `Page: ${url}`,
|
||||
markdown: `# ${title ?? url}\n\nContent for ${url}`,
|
||||
description: `Description for ${url}`,
|
||||
source_url: url,
|
||||
})
|
||||
|
||||
const createOnlineDriveFile = (id: string, name: string, type = OnlineDriveFileType.file): OnlineDriveFile => ({
|
||||
id,
|
||||
name,
|
||||
size: 2048,
|
||||
type,
|
||||
})
|
||||
|
||||
const createNotionPage = (pageId: string): NotionPage => ({
|
||||
page_id: pageId,
|
||||
page_name: `Page ${pageId}`,
|
||||
page_icon: null,
|
||||
is_bound: true,
|
||||
parent_id: 'parent-1',
|
||||
type: 'page',
|
||||
workspace_id: 'ws-1',
|
||||
})
|
||||
|
||||
describe('Pipeline Data Source Store Composition - Cross-Slice Integration', () => {
|
||||
describe('Store Initialization → All Slices Have Correct Defaults', () => {
|
||||
it('should create a store with all five slices combined', () => {
|
||||
const store = createDataSourceStore()
|
||||
const state = store.getState()
|
||||
|
||||
// Common slice defaults
|
||||
expect(state.currentCredentialId).toBe('')
|
||||
expect(state.currentNodeIdRef.current).toBe('')
|
||||
|
||||
// Local file slice defaults
|
||||
expect(state.localFileList).toEqual([])
|
||||
expect(state.currentLocalFile).toBeUndefined()
|
||||
|
||||
// Online document slice defaults
|
||||
expect(state.documentsData).toEqual([])
|
||||
expect(state.onlineDocuments).toEqual([])
|
||||
expect(state.searchValue).toBe('')
|
||||
expect(state.selectedPagesId).toEqual(new Set())
|
||||
|
||||
// Website crawl slice defaults
|
||||
expect(state.websitePages).toEqual([])
|
||||
expect(state.step).toBe(CrawlStep.init)
|
||||
expect(state.previewIndex).toBe(-1)
|
||||
|
||||
// Online drive slice defaults
|
||||
expect(state.breadcrumbs).toEqual([])
|
||||
expect(state.prefix).toEqual([])
|
||||
expect(state.keywords).toBe('')
|
||||
expect(state.selectedFileIds).toEqual([])
|
||||
expect(state.onlineDriveFileList).toEqual([])
|
||||
expect(state.bucket).toBe('')
|
||||
expect(state.hasBucket).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Cross-Slice Coordination: Shared Credential', () => {
|
||||
it('should set credential that is accessible from the common slice', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setCurrentCredentialId('cred-abc-123')
|
||||
|
||||
expect(store.getState().currentCredentialId).toBe('cred-abc-123')
|
||||
})
|
||||
|
||||
it('should allow credential update independently of all other slices', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setLocalFileList([createFileItem('f1')])
|
||||
store.getState().setCurrentCredentialId('cred-xyz')
|
||||
|
||||
expect(store.getState().currentCredentialId).toBe('cred-xyz')
|
||||
expect(store.getState().localFileList).toHaveLength(1)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Local File Workflow: Set Files → Verify List → Clear', () => {
|
||||
it('should set and retrieve local file list', () => {
|
||||
const store = createDataSourceStore()
|
||||
const files = [createFileItem('f1'), createFileItem('f2'), createFileItem('f3')]
|
||||
|
||||
store.getState().setLocalFileList(files)
|
||||
|
||||
expect(store.getState().localFileList).toHaveLength(3)
|
||||
expect(store.getState().localFileList[0].fileID).toBe('f1')
|
||||
expect(store.getState().localFileList[2].fileID).toBe('f3')
|
||||
})
|
||||
|
||||
it('should update preview ref when setting file list', () => {
|
||||
const store = createDataSourceStore()
|
||||
const files = [createFileItem('f-preview')]
|
||||
|
||||
store.getState().setLocalFileList(files)
|
||||
|
||||
expect(store.getState().previewLocalFileRef.current).toBeDefined()
|
||||
})
|
||||
|
||||
it('should clear files by setting empty list', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setLocalFileList([createFileItem('f1')])
|
||||
expect(store.getState().localFileList).toHaveLength(1)
|
||||
|
||||
store.getState().setLocalFileList([])
|
||||
expect(store.getState().localFileList).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('should set and clear current local file selection', () => {
|
||||
const store = createDataSourceStore()
|
||||
const file = { id: 'current-file', name: 'current.txt' } as FileItem['file']
|
||||
|
||||
store.getState().setCurrentLocalFile(file)
|
||||
expect(store.getState().currentLocalFile).toBeDefined()
|
||||
expect(store.getState().currentLocalFile?.id).toBe('current-file')
|
||||
|
||||
store.getState().setCurrentLocalFile(undefined)
|
||||
expect(store.getState().currentLocalFile).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Online Document Workflow: Set Documents → Select Pages → Verify', () => {
|
||||
it('should set documents data and online documents', () => {
|
||||
const store = createDataSourceStore()
|
||||
const pages = [createNotionPage('page-1'), createNotionPage('page-2')]
|
||||
|
||||
store.getState().setOnlineDocuments(pages)
|
||||
|
||||
expect(store.getState().onlineDocuments).toHaveLength(2)
|
||||
expect(store.getState().onlineDocuments[0].page_id).toBe('page-1')
|
||||
})
|
||||
|
||||
it('should update preview ref when setting online documents', () => {
|
||||
const store = createDataSourceStore()
|
||||
const pages = [createNotionPage('page-preview')]
|
||||
|
||||
store.getState().setOnlineDocuments(pages)
|
||||
|
||||
expect(store.getState().previewOnlineDocumentRef.current).toBeDefined()
|
||||
expect(store.getState().previewOnlineDocumentRef.current?.page_id).toBe('page-preview')
|
||||
})
|
||||
|
||||
it('should track selected page IDs', () => {
|
||||
const store = createDataSourceStore()
|
||||
const pages = [createNotionPage('p1'), createNotionPage('p2'), createNotionPage('p3')]
|
||||
|
||||
store.getState().setOnlineDocuments(pages)
|
||||
store.getState().setSelectedPagesId(new Set(['p1', 'p3']))
|
||||
|
||||
expect(store.getState().selectedPagesId.size).toBe(2)
|
||||
expect(store.getState().selectedPagesId.has('p1')).toBe(true)
|
||||
expect(store.getState().selectedPagesId.has('p2')).toBe(false)
|
||||
expect(store.getState().selectedPagesId.has('p3')).toBe(true)
|
||||
})
|
||||
|
||||
it('should manage search value for filtering documents', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setSearchValue('meeting notes')
|
||||
|
||||
expect(store.getState().searchValue).toBe('meeting notes')
|
||||
})
|
||||
|
||||
it('should set and clear current document selection', () => {
|
||||
const store = createDataSourceStore()
|
||||
const page = createNotionPage('current-page')
|
||||
|
||||
store.getState().setCurrentDocument(page)
|
||||
expect(store.getState().currentDocument?.page_id).toBe('current-page')
|
||||
|
||||
store.getState().setCurrentDocument(undefined)
|
||||
expect(store.getState().currentDocument).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Website Crawl Workflow: Set Pages → Track Step → Preview', () => {
|
||||
it('should set website pages and update preview ref', () => {
|
||||
const store = createDataSourceStore()
|
||||
const pages = [
|
||||
createCrawlResultItem('https://example.com'),
|
||||
createCrawlResultItem('https://example.com/about'),
|
||||
]
|
||||
|
||||
store.getState().setWebsitePages(pages)
|
||||
|
||||
expect(store.getState().websitePages).toHaveLength(2)
|
||||
expect(store.getState().previewWebsitePageRef.current?.source_url).toBe('https://example.com')
|
||||
})
|
||||
|
||||
it('should manage crawl step transitions', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
expect(store.getState().step).toBe(CrawlStep.init)
|
||||
|
||||
store.getState().setStep(CrawlStep.running)
|
||||
expect(store.getState().step).toBe(CrawlStep.running)
|
||||
|
||||
store.getState().setStep(CrawlStep.finished)
|
||||
expect(store.getState().step).toBe(CrawlStep.finished)
|
||||
})
|
||||
|
||||
it('should set crawl result with data and timing', () => {
|
||||
const store = createDataSourceStore()
|
||||
const result = {
|
||||
data: [createCrawlResultItem('https://test.com')],
|
||||
time_consuming: 3.5,
|
||||
}
|
||||
|
||||
store.getState().setCrawlResult(result)
|
||||
|
||||
expect(store.getState().crawlResult?.data).toHaveLength(1)
|
||||
expect(store.getState().crawlResult?.time_consuming).toBe(3.5)
|
||||
})
|
||||
|
||||
it('should manage preview index for page navigation', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setPreviewIndex(2)
|
||||
expect(store.getState().previewIndex).toBe(2)
|
||||
|
||||
store.getState().setPreviewIndex(-1)
|
||||
expect(store.getState().previewIndex).toBe(-1)
|
||||
})
|
||||
|
||||
it('should set and clear current website selection', () => {
|
||||
const store = createDataSourceStore()
|
||||
const page = createCrawlResultItem('https://current.com')
|
||||
|
||||
store.getState().setCurrentWebsite(page)
|
||||
expect(store.getState().currentWebsite?.source_url).toBe('https://current.com')
|
||||
|
||||
store.getState().setCurrentWebsite(undefined)
|
||||
expect(store.getState().currentWebsite).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Online Drive Workflow: Breadcrumbs → File Selection → Navigation', () => {
|
||||
it('should manage breadcrumb navigation', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setBreadcrumbs(['root', 'folder-a', 'subfolder'])
|
||||
|
||||
expect(store.getState().breadcrumbs).toEqual(['root', 'folder-a', 'subfolder'])
|
||||
})
|
||||
|
||||
it('should support breadcrumb push/pop pattern', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setBreadcrumbs(['root'])
|
||||
store.getState().setBreadcrumbs([...store.getState().breadcrumbs, 'level-1'])
|
||||
store.getState().setBreadcrumbs([...store.getState().breadcrumbs, 'level-2'])
|
||||
|
||||
expect(store.getState().breadcrumbs).toEqual(['root', 'level-1', 'level-2'])
|
||||
|
||||
// Pop back one level
|
||||
store.getState().setBreadcrumbs(store.getState().breadcrumbs.slice(0, -1))
|
||||
expect(store.getState().breadcrumbs).toEqual(['root', 'level-1'])
|
||||
})
|
||||
|
||||
it('should manage file list and selection', () => {
|
||||
const store = createDataSourceStore()
|
||||
const files = [
|
||||
createOnlineDriveFile('drive-1', 'report.pdf'),
|
||||
createOnlineDriveFile('drive-2', 'data.csv'),
|
||||
createOnlineDriveFile('drive-3', 'images', OnlineDriveFileType.folder),
|
||||
]
|
||||
|
||||
store.getState().setOnlineDriveFileList(files)
|
||||
expect(store.getState().onlineDriveFileList).toHaveLength(3)
|
||||
|
||||
store.getState().setSelectedFileIds(['drive-1', 'drive-2'])
|
||||
expect(store.getState().selectedFileIds).toEqual(['drive-1', 'drive-2'])
|
||||
})
|
||||
|
||||
it('should update preview ref when selecting files', () => {
|
||||
const store = createDataSourceStore()
|
||||
const files = [
|
||||
createOnlineDriveFile('drive-a', 'file-a.txt'),
|
||||
createOnlineDriveFile('drive-b', 'file-b.txt'),
|
||||
]
|
||||
|
||||
store.getState().setOnlineDriveFileList(files)
|
||||
store.getState().setSelectedFileIds(['drive-b'])
|
||||
|
||||
expect(store.getState().previewOnlineDriveFileRef.current?.id).toBe('drive-b')
|
||||
})
|
||||
|
||||
it('should manage bucket and prefix for S3-like navigation', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setBucket('my-data-bucket')
|
||||
store.getState().setPrefix(['data', '2024'])
|
||||
store.getState().setHasBucket(true)
|
||||
|
||||
expect(store.getState().bucket).toBe('my-data-bucket')
|
||||
expect(store.getState().prefix).toEqual(['data', '2024'])
|
||||
expect(store.getState().hasBucket).toBe(true)
|
||||
})
|
||||
|
||||
it('should manage keywords for search filtering', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setKeywords('quarterly report')
|
||||
expect(store.getState().keywords).toBe('quarterly report')
|
||||
})
|
||||
})
|
||||
|
||||
describe('State Isolation: Changes to One Slice Do Not Affect Others', () => {
|
||||
it('should keep local file state independent from online document state', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setLocalFileList([createFileItem('local-1')])
|
||||
store.getState().setOnlineDocuments([createNotionPage('notion-1')])
|
||||
|
||||
expect(store.getState().localFileList).toHaveLength(1)
|
||||
expect(store.getState().onlineDocuments).toHaveLength(1)
|
||||
|
||||
// Clearing local files should not affect online documents
|
||||
store.getState().setLocalFileList([])
|
||||
expect(store.getState().localFileList).toHaveLength(0)
|
||||
expect(store.getState().onlineDocuments).toHaveLength(1)
|
||||
})
|
||||
|
||||
it('should keep website crawl state independent from online drive state', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
store.getState().setWebsitePages([createCrawlResultItem('https://site.com')])
|
||||
store.getState().setOnlineDriveFileList([createOnlineDriveFile('d1', 'file.txt')])
|
||||
|
||||
expect(store.getState().websitePages).toHaveLength(1)
|
||||
expect(store.getState().onlineDriveFileList).toHaveLength(1)
|
||||
|
||||
// Clearing website pages should not affect drive files
|
||||
store.getState().setWebsitePages([])
|
||||
expect(store.getState().websitePages).toHaveLength(0)
|
||||
expect(store.getState().onlineDriveFileList).toHaveLength(1)
|
||||
})
|
||||
|
||||
it('should create fully independent store instances', () => {
|
||||
const storeA = createDataSourceStore()
|
||||
const storeB = createDataSourceStore()
|
||||
|
||||
storeA.getState().setCurrentCredentialId('cred-A')
|
||||
storeA.getState().setLocalFileList([createFileItem('fa-1')])
|
||||
|
||||
expect(storeA.getState().currentCredentialId).toBe('cred-A')
|
||||
expect(storeB.getState().currentCredentialId).toBe('')
|
||||
expect(storeB.getState().localFileList).toEqual([])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Full Workflow Simulation: Credential → Source → Data → Verify', () => {
|
||||
it('should support a complete local file upload workflow', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
// Step 1: Set credential
|
||||
store.getState().setCurrentCredentialId('upload-cred-1')
|
||||
|
||||
// Step 2: Set file list
|
||||
const files = [createFileItem('upload-1'), createFileItem('upload-2')]
|
||||
store.getState().setLocalFileList(files)
|
||||
|
||||
// Step 3: Select current file for preview
|
||||
store.getState().setCurrentLocalFile(files[0].file)
|
||||
|
||||
// Verify all state is consistent
|
||||
expect(store.getState().currentCredentialId).toBe('upload-cred-1')
|
||||
expect(store.getState().localFileList).toHaveLength(2)
|
||||
expect(store.getState().currentLocalFile?.id).toBe('upload-1')
|
||||
expect(store.getState().previewLocalFileRef.current).toBeDefined()
|
||||
})
|
||||
|
||||
it('should support a complete website crawl workflow', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
// Step 1: Set credential
|
||||
store.getState().setCurrentCredentialId('crawl-cred-1')
|
||||
|
||||
// Step 2: Init crawl
|
||||
store.getState().setStep(CrawlStep.running)
|
||||
|
||||
// Step 3: Crawl completes with results
|
||||
const crawledPages = [
|
||||
createCrawlResultItem('https://docs.example.com/guide'),
|
||||
createCrawlResultItem('https://docs.example.com/api'),
|
||||
createCrawlResultItem('https://docs.example.com/faq'),
|
||||
]
|
||||
store.getState().setCrawlResult({ data: crawledPages, time_consuming: 12.5 })
|
||||
store.getState().setStep(CrawlStep.finished)
|
||||
|
||||
// Step 4: Set website pages from results
|
||||
store.getState().setWebsitePages(crawledPages)
|
||||
|
||||
// Step 5: Set preview
|
||||
store.getState().setPreviewIndex(1)
|
||||
|
||||
// Verify all state
|
||||
expect(store.getState().currentCredentialId).toBe('crawl-cred-1')
|
||||
expect(store.getState().step).toBe(CrawlStep.finished)
|
||||
expect(store.getState().websitePages).toHaveLength(3)
|
||||
expect(store.getState().crawlResult?.time_consuming).toBe(12.5)
|
||||
expect(store.getState().previewIndex).toBe(1)
|
||||
expect(store.getState().previewWebsitePageRef.current?.source_url).toBe('https://docs.example.com/guide')
|
||||
})
|
||||
|
||||
it('should support a complete online drive navigation workflow', () => {
|
||||
const store = createDataSourceStore()
|
||||
|
||||
// Step 1: Set credential
|
||||
store.getState().setCurrentCredentialId('drive-cred-1')
|
||||
|
||||
// Step 2: Set bucket
|
||||
store.getState().setBucket('company-docs')
|
||||
store.getState().setHasBucket(true)
|
||||
|
||||
// Step 3: Navigate into folders
|
||||
store.getState().setBreadcrumbs(['company-docs'])
|
||||
store.getState().setPrefix(['projects'])
|
||||
const folderFiles = [
|
||||
createOnlineDriveFile('proj-1', 'project-alpha', OnlineDriveFileType.folder),
|
||||
createOnlineDriveFile('proj-2', 'project-beta', OnlineDriveFileType.folder),
|
||||
createOnlineDriveFile('readme', 'README.md', OnlineDriveFileType.file),
|
||||
]
|
||||
store.getState().setOnlineDriveFileList(folderFiles)
|
||||
|
||||
// Step 4: Navigate deeper
|
||||
store.getState().setBreadcrumbs([...store.getState().breadcrumbs, 'project-alpha'])
|
||||
store.getState().setPrefix([...store.getState().prefix, 'project-alpha'])
|
||||
|
||||
// Step 5: Select files
|
||||
store.getState().setOnlineDriveFileList([
|
||||
createOnlineDriveFile('doc-1', 'spec.pdf'),
|
||||
createOnlineDriveFile('doc-2', 'design.fig'),
|
||||
])
|
||||
store.getState().setSelectedFileIds(['doc-1'])
|
||||
|
||||
// Verify full state
|
||||
expect(store.getState().currentCredentialId).toBe('drive-cred-1')
|
||||
expect(store.getState().bucket).toBe('company-docs')
|
||||
expect(store.getState().breadcrumbs).toEqual(['company-docs', 'project-alpha'])
|
||||
expect(store.getState().prefix).toEqual(['projects', 'project-alpha'])
|
||||
expect(store.getState().onlineDriveFileList).toHaveLength(2)
|
||||
expect(store.getState().selectedFileIds).toEqual(['doc-1'])
|
||||
expect(store.getState().previewOnlineDriveFileRef.current?.name).toBe('spec.pdf')
|
||||
})
|
||||
})
|
||||
})
|
||||
301
web/__tests__/datasets/segment-crud.test.tsx
Normal file
301
web/__tests__/datasets/segment-crud.test.tsx
Normal file
@@ -0,0 +1,301 @@
|
||||
/**
|
||||
* Integration Test: Segment CRUD Flow
|
||||
*
|
||||
* Tests segment selection, search/filter, and modal state management across hooks.
|
||||
* Validates cross-hook data contracts in the completed segment module.
|
||||
*/
|
||||
|
||||
import type { SegmentDetailModel } from '@/models/datasets'
|
||||
import { act, renderHook } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { useModalState } from '@/app/components/datasets/documents/detail/completed/hooks/use-modal-state'
|
||||
import { useSearchFilter } from '@/app/components/datasets/documents/detail/completed/hooks/use-search-filter'
|
||||
import { useSegmentSelection } from '@/app/components/datasets/documents/detail/completed/hooks/use-segment-selection'
|
||||
|
||||
const createSegment = (id: string, content = 'Test segment content'): SegmentDetailModel => ({
|
||||
id,
|
||||
position: 1,
|
||||
document_id: 'doc-1',
|
||||
content,
|
||||
sign_content: content,
|
||||
answer: '',
|
||||
word_count: 50,
|
||||
tokens: 25,
|
||||
keywords: ['test'],
|
||||
index_node_id: 'idx-1',
|
||||
index_node_hash: 'hash-1',
|
||||
hit_count: 0,
|
||||
enabled: true,
|
||||
disabled_at: 0,
|
||||
disabled_by: '',
|
||||
status: 'completed',
|
||||
created_by: 'user-1',
|
||||
created_at: Date.now(),
|
||||
indexing_at: Date.now(),
|
||||
completed_at: Date.now(),
|
||||
error: null,
|
||||
stopped_at: 0,
|
||||
updated_at: Date.now(),
|
||||
attachments: [],
|
||||
} as SegmentDetailModel)
|
||||
|
||||
describe('Segment CRUD Flow', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Search and Filter → Segment List Query', () => {
|
||||
it('should manage search input with debounce', () => {
|
||||
vi.useFakeTimers()
|
||||
const onPageChange = vi.fn()
|
||||
const { result } = renderHook(() => useSearchFilter({ onPageChange }))
|
||||
|
||||
act(() => {
|
||||
result.current.handleInputChange('keyword')
|
||||
})
|
||||
|
||||
expect(result.current.inputValue).toBe('keyword')
|
||||
expect(result.current.searchValue).toBe('')
|
||||
|
||||
act(() => {
|
||||
vi.advanceTimersByTime(500)
|
||||
})
|
||||
expect(result.current.searchValue).toBe('keyword')
|
||||
expect(onPageChange).toHaveBeenCalledWith(1)
|
||||
|
||||
vi.useRealTimers()
|
||||
})
|
||||
|
||||
it('should manage status filter state', () => {
|
||||
const onPageChange = vi.fn()
|
||||
const { result } = renderHook(() => useSearchFilter({ onPageChange }))
|
||||
|
||||
// status value 1 maps to !!1 = true (enabled)
|
||||
act(() => {
|
||||
result.current.onChangeStatus({ value: 1, name: 'enabled' })
|
||||
})
|
||||
// onChangeStatus converts: value === 'all' ? 'all' : !!value
|
||||
expect(result.current.selectedStatus).toBe(true)
|
||||
|
||||
act(() => {
|
||||
result.current.onClearFilter()
|
||||
})
|
||||
expect(result.current.selectedStatus).toBe('all')
|
||||
expect(result.current.inputValue).toBe('')
|
||||
})
|
||||
|
||||
it('should provide status list for filter dropdown', () => {
|
||||
const { result } = renderHook(() => useSearchFilter({ onPageChange: vi.fn() }))
|
||||
expect(result.current.statusList).toBeInstanceOf(Array)
|
||||
expect(result.current.statusList.length).toBe(3) // all, disabled, enabled
|
||||
})
|
||||
|
||||
it('should compute selectDefaultValue based on selectedStatus', () => {
|
||||
const { result } = renderHook(() => useSearchFilter({ onPageChange: vi.fn() }))
|
||||
|
||||
// Initial state: 'all'
|
||||
expect(result.current.selectDefaultValue).toBe('all')
|
||||
|
||||
// Set to enabled (true)
|
||||
act(() => {
|
||||
result.current.onChangeStatus({ value: 1, name: 'enabled' })
|
||||
})
|
||||
expect(result.current.selectDefaultValue).toBe(1)
|
||||
|
||||
// Set to disabled (false)
|
||||
act(() => {
|
||||
result.current.onChangeStatus({ value: 0, name: 'disabled' })
|
||||
})
|
||||
expect(result.current.selectDefaultValue).toBe(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Segment Selection → Batch Operations', () => {
|
||||
const segments = [
|
||||
createSegment('seg-1'),
|
||||
createSegment('seg-2'),
|
||||
createSegment('seg-3'),
|
||||
]
|
||||
|
||||
it('should manage individual segment selection', () => {
|
||||
const { result } = renderHook(() => useSegmentSelection(segments))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-1')
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toContain('seg-1')
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-2')
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toContain('seg-1')
|
||||
expect(result.current.selectedSegmentIds).toContain('seg-2')
|
||||
expect(result.current.selectedSegmentIds).toHaveLength(2)
|
||||
})
|
||||
|
||||
it('should toggle selection on repeated click', () => {
|
||||
const { result } = renderHook(() => useSegmentSelection(segments))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-1')
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toContain('seg-1')
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-1')
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).not.toContain('seg-1')
|
||||
})
|
||||
|
||||
it('should support select all toggle', () => {
|
||||
const { result } = renderHook(() => useSegmentSelection(segments))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelectedAll()
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toHaveLength(3)
|
||||
expect(result.current.isAllSelected).toBe(true)
|
||||
|
||||
act(() => {
|
||||
result.current.onSelectedAll()
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toHaveLength(0)
|
||||
expect(result.current.isAllSelected).toBe(false)
|
||||
})
|
||||
|
||||
it('should detect partial selection via isSomeSelected', () => {
|
||||
const { result } = renderHook(() => useSegmentSelection(segments))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-1')
|
||||
})
|
||||
|
||||
// After selecting one of three, isSomeSelected should be true
|
||||
expect(result.current.selectedSegmentIds).toEqual(['seg-1'])
|
||||
expect(result.current.isSomeSelected).toBe(true)
|
||||
expect(result.current.isAllSelected).toBe(false)
|
||||
})
|
||||
|
||||
it('should clear selection via onCancelBatchOperation', () => {
|
||||
const { result } = renderHook(() => useSegmentSelection(segments))
|
||||
|
||||
act(() => {
|
||||
result.current.onSelected('seg-1')
|
||||
result.current.onSelected('seg-2')
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toHaveLength(2)
|
||||
|
||||
act(() => {
|
||||
result.current.onCancelBatchOperation()
|
||||
})
|
||||
expect(result.current.selectedSegmentIds).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Modal State Management', () => {
|
||||
const onNewSegmentModalChange = vi.fn()
|
||||
|
||||
it('should open segment detail modal on card click', () => {
|
||||
const { result } = renderHook(() => useModalState({ onNewSegmentModalChange }))
|
||||
|
||||
const segment = createSegment('seg-detail-1', 'Detail content')
|
||||
act(() => {
|
||||
result.current.onClickCard(segment)
|
||||
})
|
||||
expect(result.current.currSegment.showModal).toBe(true)
|
||||
expect(result.current.currSegment.segInfo).toBeDefined()
|
||||
expect(result.current.currSegment.segInfo!.id).toBe('seg-detail-1')
|
||||
})
|
||||
|
||||
it('should close segment detail modal', () => {
|
||||
const { result } = renderHook(() => useModalState({ onNewSegmentModalChange }))
|
||||
|
||||
const segment = createSegment('seg-1')
|
||||
act(() => {
|
||||
result.current.onClickCard(segment)
|
||||
})
|
||||
expect(result.current.currSegment.showModal).toBe(true)
|
||||
|
||||
act(() => {
|
||||
result.current.onCloseSegmentDetail()
|
||||
})
|
||||
expect(result.current.currSegment.showModal).toBe(false)
|
||||
})
|
||||
|
||||
it('should manage full screen toggle', () => {
|
||||
const { result } = renderHook(() => useModalState({ onNewSegmentModalChange }))
|
||||
|
||||
expect(result.current.fullScreen).toBe(false)
|
||||
act(() => {
|
||||
result.current.toggleFullScreen()
|
||||
})
|
||||
expect(result.current.fullScreen).toBe(true)
|
||||
act(() => {
|
||||
result.current.toggleFullScreen()
|
||||
})
|
||||
expect(result.current.fullScreen).toBe(false)
|
||||
})
|
||||
|
||||
it('should manage collapsed state', () => {
|
||||
const { result } = renderHook(() => useModalState({ onNewSegmentModalChange }))
|
||||
|
||||
expect(result.current.isCollapsed).toBe(true)
|
||||
act(() => {
|
||||
result.current.toggleCollapsed()
|
||||
})
|
||||
expect(result.current.isCollapsed).toBe(false)
|
||||
})
|
||||
|
||||
it('should manage new child segment modal', () => {
|
||||
const { result } = renderHook(() => useModalState({ onNewSegmentModalChange }))
|
||||
|
||||
expect(result.current.showNewChildSegmentModal).toBe(false)
|
||||
act(() => {
|
||||
result.current.handleAddNewChildChunk('chunk-parent-1')
|
||||
})
|
||||
expect(result.current.showNewChildSegmentModal).toBe(true)
|
||||
expect(result.current.currChunkId).toBe('chunk-parent-1')
|
||||
|
||||
act(() => {
|
||||
result.current.onCloseNewChildChunkModal()
|
||||
})
|
||||
expect(result.current.showNewChildSegmentModal).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Cross-Hook Data Flow: Search → Selection → Modal', () => {
|
||||
it('should maintain independent state across all three hooks', () => {
|
||||
const segments = [createSegment('seg-1'), createSegment('seg-2')]
|
||||
|
||||
const { result: filterResult } = renderHook(() =>
|
||||
useSearchFilter({ onPageChange: vi.fn() }),
|
||||
)
|
||||
const { result: selectionResult } = renderHook(() =>
|
||||
useSegmentSelection(segments),
|
||||
)
|
||||
const { result: modalResult } = renderHook(() =>
|
||||
useModalState({ onNewSegmentModalChange: vi.fn() }),
|
||||
)
|
||||
|
||||
// Set search filter to enabled
|
||||
act(() => {
|
||||
filterResult.current.onChangeStatus({ value: 1, name: 'enabled' })
|
||||
})
|
||||
|
||||
// Select a segment
|
||||
act(() => {
|
||||
selectionResult.current.onSelected('seg-1')
|
||||
})
|
||||
|
||||
// Open detail modal
|
||||
act(() => {
|
||||
modalResult.current.onClickCard(segments[0])
|
||||
})
|
||||
|
||||
// All states should be independent
|
||||
expect(filterResult.current.selectedStatus).toBe(true) // !!1
|
||||
expect(selectionResult.current.selectedSegmentIds).toContain('seg-1')
|
||||
expect(modalResult.current.currSegment.showModal).toBe(true)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,261 +0,0 @@
|
||||
/**
|
||||
* MAX_PARALLEL_LIMIT Configuration Bug Test
|
||||
*
|
||||
* This test reproduces and verifies the fix for issue #23083:
|
||||
* MAX_PARALLEL_LIMIT environment variable does not take effect in iteration panel
|
||||
*/
|
||||
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import * as React from 'react'
|
||||
|
||||
// Mock environment variables before importing constants
|
||||
const originalEnv = process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT
|
||||
|
||||
// Test with different environment values
|
||||
function setupEnvironment(value?: string) {
|
||||
if (value)
|
||||
process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = value
|
||||
else
|
||||
delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT
|
||||
|
||||
// Clear module cache to force re-evaluation
|
||||
vi.resetModules()
|
||||
}
|
||||
|
||||
function restoreEnvironment() {
|
||||
if (originalEnv)
|
||||
process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = originalEnv
|
||||
else
|
||||
delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT
|
||||
|
||||
vi.resetModules()
|
||||
}
|
||||
|
||||
// Mock i18next with proper implementation
|
||||
vi.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => {
|
||||
if (key.includes('MaxParallelismTitle'))
|
||||
return 'Max Parallelism'
|
||||
if (key.includes('MaxParallelismDesc'))
|
||||
return 'Maximum number of parallel executions'
|
||||
if (key.includes('parallelMode'))
|
||||
return 'Parallel Mode'
|
||||
if (key.includes('parallelPanelDesc'))
|
||||
return 'Enable parallel execution'
|
||||
if (key.includes('errorResponseMethod'))
|
||||
return 'Error Response Method'
|
||||
return key
|
||||
},
|
||||
}),
|
||||
initReactI18next: {
|
||||
type: '3rdParty',
|
||||
init: vi.fn(),
|
||||
},
|
||||
}))
|
||||
|
||||
// Mock i18next module completely to prevent initialization issues
|
||||
vi.mock('i18next', () => ({
|
||||
use: vi.fn().mockReturnThis(),
|
||||
init: vi.fn().mockReturnThis(),
|
||||
t: vi.fn(key => key),
|
||||
isInitialized: true,
|
||||
}))
|
||||
|
||||
// Mock the useConfig hook
|
||||
vi.mock('@/app/components/workflow/nodes/iteration/use-config', () => ({
|
||||
default: () => ({
|
||||
inputs: {
|
||||
is_parallel: true,
|
||||
parallel_nums: 5,
|
||||
error_handle_mode: 'terminated',
|
||||
},
|
||||
changeParallel: vi.fn(),
|
||||
changeParallelNums: vi.fn(),
|
||||
changeErrorHandleMode: vi.fn(),
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock other components
|
||||
vi.mock('@/app/components/workflow/nodes/_base/components/variable/var-reference-picker', () => ({
|
||||
default: function MockVarReferencePicker() {
|
||||
return <div data-testid="var-reference-picker">VarReferencePicker</div>
|
||||
},
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/workflow/nodes/_base/components/split', () => ({
|
||||
default: function MockSplit() {
|
||||
return <div data-testid="split">Split</div>
|
||||
},
|
||||
}))
|
||||
|
||||
vi.mock('@/app/components/workflow/nodes/_base/components/field', () => ({
|
||||
default: function MockField({ title, children }: { title: string, children: React.ReactNode }) {
|
||||
return (
|
||||
<div data-testid="field">
|
||||
<label>{title}</label>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
},
|
||||
}))
|
||||
|
||||
const getParallelControls = () => ({
|
||||
numberInput: screen.getByRole('spinbutton'),
|
||||
slider: screen.getByRole('slider'),
|
||||
})
|
||||
|
||||
describe('MAX_PARALLEL_LIMIT Configuration Bug', () => {
|
||||
const mockNodeData = {
|
||||
id: 'test-iteration-node',
|
||||
type: 'iteration' as const,
|
||||
data: {
|
||||
title: 'Test Iteration',
|
||||
desc: 'Test iteration node',
|
||||
iterator_selector: ['test'],
|
||||
output_selector: ['output'],
|
||||
is_parallel: true,
|
||||
parallel_nums: 5,
|
||||
error_handle_mode: 'terminated' as const,
|
||||
},
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
restoreEnvironment()
|
||||
})
|
||||
|
||||
afterAll(() => {
|
||||
restoreEnvironment()
|
||||
})
|
||||
|
||||
describe('Environment Variable Parsing', () => {
|
||||
it('should parse MAX_PARALLEL_LIMIT from NEXT_PUBLIC_MAX_PARALLEL_LIMIT environment variable', async () => {
|
||||
setupEnvironment('25')
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(25)
|
||||
})
|
||||
|
||||
it('should fallback to default when environment variable is not set', async () => {
|
||||
setupEnvironment() // No environment variable
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(10)
|
||||
})
|
||||
|
||||
it('should handle invalid environment variable values', async () => {
|
||||
setupEnvironment('invalid')
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
|
||||
// Should fall back to default when parsing fails
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(10)
|
||||
})
|
||||
|
||||
it('should handle empty environment variable', async () => {
|
||||
setupEnvironment('')
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
|
||||
// Should fall back to default when empty
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(10)
|
||||
})
|
||||
|
||||
// Edge cases for boundary values
|
||||
it('should clamp MAX_PARALLEL_LIMIT to MIN when env is 0 or negative', async () => {
|
||||
setupEnvironment('0')
|
||||
let { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default
|
||||
|
||||
setupEnvironment('-5')
|
||||
;({ MAX_PARALLEL_LIMIT } = await import('@/config'))
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(10) // Falls back to default
|
||||
})
|
||||
|
||||
it('should handle float numbers by parseInt behavior', async () => {
|
||||
setupEnvironment('12.7')
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
// parseInt truncates to integer
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(12)
|
||||
})
|
||||
})
|
||||
|
||||
describe('UI Component Integration (Main Fix Verification)', () => {
|
||||
it('should render iteration panel with environment-configured max value', async () => {
|
||||
// Set environment variable to a different value
|
||||
setupEnvironment('30')
|
||||
|
||||
// Import Panel after setting environment
|
||||
const Panel = await import('@/app/components/workflow/nodes/iteration/panel').then(mod => mod.default)
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
|
||||
render(
|
||||
<Panel
|
||||
id="test-node"
|
||||
// @ts-expect-error key type mismatch
|
||||
data={mockNodeData.data}
|
||||
/>,
|
||||
)
|
||||
|
||||
// Behavior-focused assertion: UI max should equal MAX_PARALLEL_LIMIT
|
||||
const { numberInput, slider } = getParallelControls()
|
||||
expect(numberInput).toHaveAttribute('max', String(MAX_PARALLEL_LIMIT))
|
||||
expect(slider).toHaveAttribute('aria-valuemax', String(MAX_PARALLEL_LIMIT))
|
||||
|
||||
// Verify the actual values
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(30)
|
||||
expect(numberInput.getAttribute('max')).toBe('30')
|
||||
expect(slider.getAttribute('aria-valuemax')).toBe('30')
|
||||
})
|
||||
|
||||
it('should maintain UI consistency with different environment values', async () => {
|
||||
setupEnvironment('15')
|
||||
const Panel = await import('@/app/components/workflow/nodes/iteration/panel').then(mod => mod.default)
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
|
||||
render(
|
||||
<Panel
|
||||
id="test-node"
|
||||
// @ts-expect-error key type mismatch
|
||||
data={mockNodeData.data}
|
||||
/>,
|
||||
)
|
||||
|
||||
// Both input and slider should use the same max value from MAX_PARALLEL_LIMIT
|
||||
const { numberInput, slider } = getParallelControls()
|
||||
|
||||
expect(numberInput.getAttribute('max')).toBe(slider.getAttribute('aria-valuemax'))
|
||||
expect(numberInput.getAttribute('max')).toBe(String(MAX_PARALLEL_LIMIT))
|
||||
})
|
||||
})
|
||||
|
||||
describe('Legacy Constant Verification (For Transition Period)', () => {
|
||||
// Marked as transition/deprecation tests
|
||||
it('should maintain MAX_ITERATION_PARALLEL_NUM for backward compatibility', async () => {
|
||||
const { MAX_ITERATION_PARALLEL_NUM } = await import('@/app/components/workflow/constants')
|
||||
expect(typeof MAX_ITERATION_PARALLEL_NUM).toBe('number')
|
||||
expect(MAX_ITERATION_PARALLEL_NUM).toBe(10) // Hardcoded legacy value
|
||||
})
|
||||
|
||||
it('should demonstrate MAX_PARALLEL_LIMIT vs legacy constant difference', async () => {
|
||||
setupEnvironment('50')
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
const { MAX_ITERATION_PARALLEL_NUM } = await import('@/app/components/workflow/constants')
|
||||
|
||||
// MAX_PARALLEL_LIMIT is configurable, MAX_ITERATION_PARALLEL_NUM is not
|
||||
expect(MAX_PARALLEL_LIMIT).toBe(50)
|
||||
expect(MAX_ITERATION_PARALLEL_NUM).toBe(10)
|
||||
expect(MAX_PARALLEL_LIMIT).not.toBe(MAX_ITERATION_PARALLEL_NUM)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Constants Validation', () => {
|
||||
it('should validate that required constants exist and have correct types', async () => {
|
||||
const { MAX_PARALLEL_LIMIT } = await import('@/config')
|
||||
const { MIN_ITERATION_PARALLEL_NUM } = await import('@/app/components/workflow/constants')
|
||||
expect(typeof MAX_PARALLEL_LIMIT).toBe('number')
|
||||
expect(typeof MIN_ITERATION_PARALLEL_NUM).toBe('number')
|
||||
expect(MAX_PARALLEL_LIMIT).toBeGreaterThanOrEqual(MIN_ITERATION_PARALLEL_NUM)
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -3,7 +3,6 @@ import type { CSSProperties, ReactNode } from 'react'
|
||||
import { cva } from 'class-variance-authority'
|
||||
import * as React from 'react'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import './index.css'
|
||||
|
||||
enum BadgeState {
|
||||
Warning = 'warning',
|
||||
|
||||
@@ -8,6 +8,7 @@ import { UserActionButtonType } from '@/app/components/workflow/nodes/human-inpu
|
||||
import 'dayjs/locale/en'
|
||||
import 'dayjs/locale/zh-cn'
|
||||
import 'dayjs/locale/ja'
|
||||
import 'dayjs/locale/nl'
|
||||
|
||||
dayjs.extend(utc)
|
||||
dayjs.extend(relativeTime)
|
||||
@@ -45,6 +46,7 @@ const localeMap: Record<string, string> = {
|
||||
'en-US': 'en',
|
||||
'zh-Hans': 'zh-cn',
|
||||
'ja-JP': 'ja',
|
||||
'nl-NL': 'nl',
|
||||
}
|
||||
|
||||
export const getRelativeTime = (
|
||||
|
||||
@@ -162,8 +162,10 @@ describe('useEmbeddedChatbot', () => {
|
||||
await waitFor(() => {
|
||||
expect(mockFetchChatList).toHaveBeenCalledWith('conversation-1', AppSourceType.webApp, 'app-1')
|
||||
})
|
||||
expect(result.current.pinnedConversationList).toEqual(pinnedData.data)
|
||||
expect(result.current.conversationList).toEqual(listData.data)
|
||||
await waitFor(() => {
|
||||
expect(result.current.pinnedConversationList).toEqual(pinnedData.data)
|
||||
expect(result.current.conversationList).toEqual(listData.data)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -98,7 +98,9 @@ const VoiceParamConfig = ({
|
||||
className="h-full w-full cursor-pointer rounded-lg border-0 bg-components-input-bg-normal py-1.5 pl-3 pr-10 focus-visible:bg-state-base-hover focus-visible:outline-none group-hover:bg-state-base-hover sm:text-sm sm:leading-6"
|
||||
>
|
||||
<span className={cn('block truncate text-left text-text-secondary', !languageItem?.name && 'text-text-tertiary')}>
|
||||
{languageItem?.name ? t(`voice.language.${replace(languageItem?.value, '-', '')}`, { ns: 'common' }) : localLanguagePlaceholder}
|
||||
{languageItem?.name
|
||||
? t(`voice.language.${replace(languageItem?.value ?? '', '-', '')}`, languageItem?.name, { ns: 'common' as const })
|
||||
: localLanguagePlaceholder}
|
||||
</span>
|
||||
<span className="pointer-events-none absolute inset-y-0 right-0 flex items-center pr-2">
|
||||
<ChevronDownIcon
|
||||
@@ -129,7 +131,7 @@ const VoiceParamConfig = ({
|
||||
<span
|
||||
className={cn('block', selected && 'font-normal')}
|
||||
>
|
||||
{t(`voice.language.${replace((item.value), '-', '')}`, { ns: 'common' })}
|
||||
{t(`voice.language.${replace((item.value), '-', '')}`, item.name, { ns: 'common' as const })}
|
||||
</span>
|
||||
{(selected || item.value === text2speech?.language) && (
|
||||
<span
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { RemixiconComponentType } from '@remixicon/react'
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
|
||||
export const InputTypeEnum = z.enum([
|
||||
'text-input',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { ZodNumber, ZodSchema, ZodString } from 'zod'
|
||||
import type { BaseConfiguration } from './types'
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
import { BaseFieldType } from './types'
|
||||
|
||||
export const generateZodSchema = (fields: BaseConfiguration[]) => {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
|
||||
const ContactMethod = z.union([
|
||||
z.literal('email'),
|
||||
@@ -22,10 +22,10 @@ export const UserSchema = z.object({
|
||||
.min(3, 'Surname must be at least 3 characters long')
|
||||
.regex(/^[A-Z]/, 'Surname must start with a capital letter'),
|
||||
isAcceptingTerms: z.boolean().refine(val => val, {
|
||||
message: 'You must accept the terms and conditions',
|
||||
error: 'You must accept the terms and conditions',
|
||||
}),
|
||||
contact: z.object({
|
||||
email: z.string().email('Invalid email address'),
|
||||
email: z.email('Invalid email address'),
|
||||
phone: z.string().optional(),
|
||||
preferredContactMethod: ContactMethod,
|
||||
}),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { ZodSchema, ZodString } from 'zod'
|
||||
import type { InputFieldConfiguration } from './types'
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
import { SupportedFileTypes, TransferMethod } from '@/app/components/rag-pipeline/components/panel/input-field/editor/form/schema'
|
||||
import { InputFieldType } from './types'
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
import type { FC } from 'react'
|
||||
import * as React from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import { env } from '@/env'
|
||||
import ParamItem from '.'
|
||||
|
||||
type Props = {
|
||||
@@ -11,12 +12,7 @@ type Props = {
|
||||
enable: boolean
|
||||
}
|
||||
|
||||
const maxTopK = (() => {
|
||||
const configValue = Number.parseInt(globalThis.document?.body?.getAttribute('data-public-top-k-max-value') || '', 10)
|
||||
if (configValue && !isNaN(configValue))
|
||||
return configValue
|
||||
return 10
|
||||
})()
|
||||
const maxTopK = env.NEXT_PUBLIC_TOP_K_MAX_VALUE
|
||||
const VALUE_LIMIT = {
|
||||
default: 2,
|
||||
step: 1,
|
||||
|
||||
@@ -4,7 +4,6 @@ import { cva } from 'class-variance-authority'
|
||||
import * as React from 'react'
|
||||
import { Highlight } from '@/app/components/base/icons/src/public/common'
|
||||
import { cn } from '@/utils/classnames'
|
||||
import './index.css'
|
||||
|
||||
const PremiumBadgeVariants = cva(
|
||||
'premium-badge',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { noop } from 'es-toolkit/function'
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
import withValidation from '.'
|
||||
|
||||
describe('withValidation HOC', () => {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { Meta, StoryObj } from '@storybook/nextjs-vite'
|
||||
import { z } from 'zod'
|
||||
import * as z from 'zod'
|
||||
import withValidation from '.'
|
||||
|
||||
// Sample components to wrap with validation
|
||||
@@ -65,7 +65,7 @@ const ProductCard = ({ name, price, category, inStock }: ProductCardProps) => {
|
||||
// Create validated versions
|
||||
const userSchema = z.object({
|
||||
name: z.string().min(1, 'Name is required'),
|
||||
email: z.string().email('Invalid email'),
|
||||
email: z.email('Invalid email'),
|
||||
age: z.number().min(0).max(150),
|
||||
})
|
||||
|
||||
@@ -371,7 +371,7 @@ export const ConfigurationValidation: Story = {
|
||||
)
|
||||
|
||||
const configSchema = z.object({
|
||||
apiUrl: z.string().url('Must be valid URL'),
|
||||
apiUrl: z.url('Must be valid URL'),
|
||||
timeout: z.number().min(0).max(30000),
|
||||
retries: z.number().min(0).max(5),
|
||||
debug: z.boolean(),
|
||||
@@ -430,7 +430,7 @@ export const UsageDocumentation: Story = {
|
||||
<div>
|
||||
<h4 className="mb-2 text-sm font-semibold text-gray-900">Usage Example</h4>
|
||||
<pre className="overflow-x-auto rounded-lg bg-gray-900 p-4 text-xs text-gray-100">
|
||||
{`import { z } from 'zod'
|
||||
{`import * as z from 'zod'
|
||||
import withValidation from './withValidation'
|
||||
|
||||
// Define your component
|
||||
|
||||
309
web/app/components/datasets/__tests__/chunk.spec.tsx
Normal file
309
web/app/components/datasets/__tests__/chunk.spec.tsx
Normal file
@@ -0,0 +1,309 @@
|
||||
import type { QA } from '@/models/datasets'
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { ChunkContainer, ChunkLabel, QAPreview } from '../chunk'
|
||||
|
||||
vi.mock('../../base/icons/src/public/knowledge', () => ({
|
||||
SelectionMod: (props: React.ComponentProps<'svg'>) => (
|
||||
<svg data-testid="selection-mod-icon" {...props} />
|
||||
),
|
||||
}))
|
||||
|
||||
function createQA(overrides: Partial<QA> = {}): QA {
|
||||
return {
|
||||
question: 'What is Dify?',
|
||||
answer: 'Dify is an open-source LLM app development platform.',
|
||||
...overrides,
|
||||
}
|
||||
}
|
||||
|
||||
describe('ChunkLabel', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Rendering', () => {
|
||||
it('should render the label text', () => {
|
||||
render(<ChunkLabel label="Chunk #1" characterCount={100} />)
|
||||
|
||||
expect(screen.getByText('Chunk #1')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the character count with unit', () => {
|
||||
render(<ChunkLabel label="Chunk #1" characterCount={256} />)
|
||||
|
||||
expect(screen.getByText('256 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the SelectionMod icon', () => {
|
||||
render(<ChunkLabel label="Chunk" characterCount={10} />)
|
||||
|
||||
expect(screen.getByTestId('selection-mod-icon')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render a middle dot separator between label and count', () => {
|
||||
render(<ChunkLabel label="Chunk" characterCount={10} />)
|
||||
|
||||
expect(screen.getByText('·')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Props', () => {
|
||||
it('should display zero character count', () => {
|
||||
render(<ChunkLabel label="Empty Chunk" characterCount={0} />)
|
||||
|
||||
expect(screen.getByText('0 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should display large character counts', () => {
|
||||
render(<ChunkLabel label="Large" characterCount={999999} />)
|
||||
|
||||
expect(screen.getByText('999999 characters')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should render with empty label', () => {
|
||||
render(<ChunkLabel label="" characterCount={50} />)
|
||||
|
||||
expect(screen.getByText('50 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with special characters in label', () => {
|
||||
render(<ChunkLabel label="Chunk <#1> & 'test'" characterCount={10} />)
|
||||
|
||||
expect(screen.getByText('Chunk <#1> & \'test\'')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Tests for ChunkContainer - wraps ChunkLabel with children content area
|
||||
describe('ChunkContainer', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Rendering', () => {
|
||||
it('should render ChunkLabel with correct props', () => {
|
||||
render(
|
||||
<ChunkContainer label="Chunk #1" characterCount={200}>
|
||||
Content here
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('Chunk #1')).toBeInTheDocument()
|
||||
expect(screen.getByText('200 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render children in the content area', () => {
|
||||
render(
|
||||
<ChunkContainer label="Chunk" characterCount={50}>
|
||||
<p>Paragraph content</p>
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('Paragraph content')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the SelectionMod icon via ChunkLabel', () => {
|
||||
render(
|
||||
<ChunkContainer label="Chunk" characterCount={10}>
|
||||
Content
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(screen.getByTestId('selection-mod-icon')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Structure', () => {
|
||||
it('should have space-y-2 on the outer container', () => {
|
||||
const { container } = render(
|
||||
<ChunkContainer label="Chunk" characterCount={10}>Content</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(container.firstElementChild).toHaveClass('space-y-2')
|
||||
})
|
||||
|
||||
it('should render children inside a styled content div', () => {
|
||||
render(
|
||||
<ChunkContainer label="Chunk" characterCount={10}>
|
||||
<span>Test child</span>
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
const contentDiv = screen.getByText('Test child').parentElement
|
||||
expect(contentDiv).toHaveClass('body-md-regular', 'text-text-secondary')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should render without children', () => {
|
||||
const { container } = render(
|
||||
<ChunkContainer label="Empty" characterCount={0} />,
|
||||
)
|
||||
|
||||
expect(container.firstElementChild).toBeInTheDocument()
|
||||
expect(screen.getByText('Empty')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render multiple children', () => {
|
||||
render(
|
||||
<ChunkContainer label="Multi" characterCount={100}>
|
||||
<span>First</span>
|
||||
<span>Second</span>
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('First')).toBeInTheDocument()
|
||||
expect(screen.getByText('Second')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with string children', () => {
|
||||
render(
|
||||
<ChunkContainer label="Text" characterCount={5}>
|
||||
Plain text content
|
||||
</ChunkContainer>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('Plain text content')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Tests for QAPreview - displays question and answer pair
|
||||
describe('QAPreview', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
describe('Rendering', () => {
|
||||
it('should render the question text', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('What is Dify?')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the answer text', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('Dify is an open-source LLM app development platform.')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render Q and A labels', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('Q')).toBeInTheDocument()
|
||||
expect(screen.getByText('A')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Structure', () => {
|
||||
it('should render Q label as a label element', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const qLabel = screen.getByText('Q')
|
||||
expect(qLabel.tagName).toBe('LABEL')
|
||||
})
|
||||
|
||||
it('should render A label as a label element', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const aLabel = screen.getByText('A')
|
||||
expect(aLabel.tagName).toBe('LABEL')
|
||||
})
|
||||
|
||||
it('should render question in a p element', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const questionEl = screen.getByText(qa.question)
|
||||
expect(questionEl.tagName).toBe('P')
|
||||
})
|
||||
|
||||
it('should render answer in a p element', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const answerEl = screen.getByText(qa.answer)
|
||||
expect(answerEl.tagName).toBe('P')
|
||||
})
|
||||
|
||||
it('should have the outer container with flex column layout', () => {
|
||||
const qa = createQA()
|
||||
const { container } = render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(container.firstElementChild).toHaveClass('flex', 'flex-col', 'gap-y-2')
|
||||
})
|
||||
|
||||
it('should apply text styling classes to question paragraph', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const questionEl = screen.getByText(qa.question)
|
||||
expect(questionEl).toHaveClass('body-md-regular', 'text-text-secondary')
|
||||
})
|
||||
|
||||
it('should apply text styling classes to answer paragraph', () => {
|
||||
const qa = createQA()
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const answerEl = screen.getByText(qa.answer)
|
||||
expect(answerEl).toHaveClass('body-md-regular', 'text-text-secondary')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Edge Cases', () => {
|
||||
it('should render with empty question', () => {
|
||||
const qa = createQA({ question: '' })
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('Q')).toBeInTheDocument()
|
||||
expect(screen.getByText('A')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with empty answer', () => {
|
||||
const qa = createQA({ answer: '' })
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('Q')).toBeInTheDocument()
|
||||
expect(screen.getByText(qa.question)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with long text', () => {
|
||||
const longText = 'x'.repeat(1000)
|
||||
const qa = createQA({ question: longText, answer: longText })
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
const elements = screen.getAllByText(longText)
|
||||
expect(elements).toHaveLength(2)
|
||||
})
|
||||
|
||||
it('should render with special characters in question and answer', () => {
|
||||
const qa = createQA({
|
||||
question: 'What about <html> & "quotes"?',
|
||||
answer: 'It handles \'single\' & "double" quotes.',
|
||||
})
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText('What about <html> & "quotes"?')).toBeInTheDocument()
|
||||
expect(screen.getByText('It handles \'single\' & "double" quotes.')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with multiline text', () => {
|
||||
const qa = createQA({
|
||||
question: 'Line1\nLine2',
|
||||
answer: 'Answer1\nAnswer2',
|
||||
})
|
||||
render(<QAPreview qa={qa} />)
|
||||
|
||||
expect(screen.getByText(/Line1/)).toBeInTheDocument()
|
||||
expect(screen.getByText(/Answer1/)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,6 @@
|
||||
import { cleanup, render } from '@testing-library/react'
|
||||
import { afterEach, describe, expect, it } from 'vitest'
|
||||
import DatasetsLoading from './loading'
|
||||
import DatasetsLoading from '../loading'
|
||||
|
||||
afterEach(() => {
|
||||
cleanup()
|
||||
@@ -1,13 +1,6 @@
|
||||
import { cleanup, render, screen } from '@testing-library/react'
|
||||
import { afterEach, describe, expect, it, vi } from 'vitest'
|
||||
import NoLinkedAppsPanel from './no-linked-apps-panel'
|
||||
|
||||
// Mock react-i18next
|
||||
vi.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => key,
|
||||
}),
|
||||
}))
|
||||
import NoLinkedAppsPanel from '../no-linked-apps-panel'
|
||||
|
||||
// Mock useDocLink
|
||||
vi.mock('@/context/i18n', () => ({
|
||||
@@ -21,17 +14,17 @@ afterEach(() => {
|
||||
describe('NoLinkedAppsPanel', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<NoLinkedAppsPanel />)
|
||||
expect(screen.getByText('datasetMenus.emptyTip')).toBeInTheDocument()
|
||||
expect(screen.getByText('common.datasetMenus.emptyTip')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the empty tip text', () => {
|
||||
render(<NoLinkedAppsPanel />)
|
||||
expect(screen.getByText('datasetMenus.emptyTip')).toBeInTheDocument()
|
||||
expect(screen.getByText('common.datasetMenus.emptyTip')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render the view doc link', () => {
|
||||
render(<NoLinkedAppsPanel />)
|
||||
expect(screen.getByText('datasetMenus.viewDoc')).toBeInTheDocument()
|
||||
expect(screen.getByText('common.datasetMenus.viewDoc')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render link with correct href', () => {
|
||||
@@ -1,6 +1,6 @@
|
||||
import { cleanup, render, screen } from '@testing-library/react'
|
||||
import { afterEach, describe, expect, it } from 'vitest'
|
||||
import ApiIndex from './index'
|
||||
import ApiIndex from '../index'
|
||||
|
||||
afterEach(() => {
|
||||
cleanup()
|
||||
@@ -1,111 +0,0 @@
|
||||
import { cleanup, render, screen } from '@testing-library/react'
|
||||
import { afterEach, describe, expect, it } from 'vitest'
|
||||
import { ChunkContainer, ChunkLabel, QAPreview } from './chunk'
|
||||
|
||||
afterEach(() => {
|
||||
cleanup()
|
||||
})
|
||||
|
||||
describe('ChunkLabel', () => {
|
||||
it('should render label text', () => {
|
||||
render(<ChunkLabel label="Chunk 1" characterCount={100} />)
|
||||
expect(screen.getByText('Chunk 1')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render character count', () => {
|
||||
render(<ChunkLabel label="Chunk 1" characterCount={150} />)
|
||||
expect(screen.getByText('150 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render separator dot', () => {
|
||||
render(<ChunkLabel label="Chunk 1" characterCount={100} />)
|
||||
expect(screen.getByText('·')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with zero character count', () => {
|
||||
render(<ChunkLabel label="Empty Chunk" characterCount={0} />)
|
||||
expect(screen.getByText('0 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with large character count', () => {
|
||||
render(<ChunkLabel label="Large Chunk" characterCount={999999} />)
|
||||
expect(screen.getByText('999999 characters')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('ChunkContainer', () => {
|
||||
it('should render label and character count', () => {
|
||||
render(<ChunkContainer label="Container 1" characterCount={200}>Content</ChunkContainer>)
|
||||
expect(screen.getByText('Container 1')).toBeInTheDocument()
|
||||
expect(screen.getByText('200 characters')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render children content', () => {
|
||||
render(<ChunkContainer label="Container 1" characterCount={200}>Test Content</ChunkContainer>)
|
||||
expect(screen.getByText('Test Content')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with complex children', () => {
|
||||
render(
|
||||
<ChunkContainer label="Container" characterCount={100}>
|
||||
<div data-testid="child-div">
|
||||
<span>Nested content</span>
|
||||
</div>
|
||||
</ChunkContainer>,
|
||||
)
|
||||
expect(screen.getByTestId('child-div')).toBeInTheDocument()
|
||||
expect(screen.getByText('Nested content')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render empty children', () => {
|
||||
render(<ChunkContainer label="Empty" characterCount={0}>{null}</ChunkContainer>)
|
||||
expect(screen.getByText('Empty')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
describe('QAPreview', () => {
|
||||
const mockQA = {
|
||||
question: 'What is the meaning of life?',
|
||||
answer: 'The meaning of life is 42.',
|
||||
}
|
||||
|
||||
it('should render question text', () => {
|
||||
render(<QAPreview qa={mockQA} />)
|
||||
expect(screen.getByText('What is the meaning of life?')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render answer text', () => {
|
||||
render(<QAPreview qa={mockQA} />)
|
||||
expect(screen.getByText('The meaning of life is 42.')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render Q label', () => {
|
||||
render(<QAPreview qa={mockQA} />)
|
||||
expect(screen.getByText('Q')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render A label', () => {
|
||||
render(<QAPreview qa={mockQA} />)
|
||||
expect(screen.getByText('A')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with empty strings', () => {
|
||||
render(<QAPreview qa={{ question: '', answer: '' }} />)
|
||||
expect(screen.getByText('Q')).toBeInTheDocument()
|
||||
expect(screen.getByText('A')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with long text', () => {
|
||||
const longQuestion = 'Q'.repeat(500)
|
||||
const longAnswer = 'A'.repeat(500)
|
||||
render(<QAPreview qa={{ question: longQuestion, answer: longAnswer }} />)
|
||||
expect(screen.getByText(longQuestion)).toBeInTheDocument()
|
||||
expect(screen.getByText(longAnswer)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render with special characters', () => {
|
||||
render(<QAPreview qa={{ question: 'What about <script>?', answer: '& special chars!' }} />)
|
||||
expect(screen.getByText('What about <script>?')).toBeInTheDocument()
|
||||
expect(screen.getByText('& special chars!')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
@@ -4,7 +4,7 @@ import { describe, expect, it } from 'vitest'
|
||||
import { ConfigurationMethodEnum, ModelStatusEnum, ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations'
|
||||
import { RerankingModeEnum } from '@/models/datasets'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
import { ensureRerankModelSelected, isReRankModelSelected } from './check-rerank-model'
|
||||
import { ensureRerankModelSelected, isReRankModelSelected } from '../check-rerank-model'
|
||||
|
||||
// Test data factory
|
||||
const createRetrievalConfig = (overrides: Partial<RetrievalConfig> = {}): RetrievalConfig => ({
|
||||
@@ -1,6 +1,6 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import ChunkingModeLabel from './chunking-mode-label'
|
||||
import ChunkingModeLabel from '../chunking-mode-label'
|
||||
|
||||
describe('ChunkingModeLabel', () => {
|
||||
describe('Rendering', () => {
|
||||
@@ -1,6 +1,6 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { CredentialIcon } from './credential-icon'
|
||||
import { CredentialIcon } from '../credential-icon'
|
||||
|
||||
describe('CredentialIcon', () => {
|
||||
describe('Rendering', () => {
|
||||
@@ -1,6 +1,6 @@
|
||||
import { render } from '@testing-library/react'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import DocumentFileIcon from './document-file-icon'
|
||||
import DocumentFileIcon from '../document-file-icon'
|
||||
|
||||
describe('DocumentFileIcon', () => {
|
||||
describe('Rendering', () => {
|
||||
@@ -0,0 +1,49 @@
|
||||
import type { DocumentItem } from '@/models/datasets'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import DocumentList from '../document-list'
|
||||
|
||||
vi.mock('../../document-file-icon', () => ({
|
||||
default: ({ name, extension }: { name?: string, extension?: string }) => (
|
||||
<span data-testid="file-icon">
|
||||
{name}
|
||||
.
|
||||
{extension}
|
||||
</span>
|
||||
),
|
||||
}))
|
||||
|
||||
describe('DocumentList', () => {
|
||||
const mockList = [
|
||||
{ id: 'doc-1', name: 'report', extension: 'pdf' },
|
||||
{ id: 'doc-2', name: 'data', extension: 'csv' },
|
||||
] as DocumentItem[]
|
||||
|
||||
const onChange = vi.fn()
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should render all documents', () => {
|
||||
render(<DocumentList list={mockList} onChange={onChange} />)
|
||||
expect(screen.getByText('report')).toBeInTheDocument()
|
||||
expect(screen.getByText('data')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render file icons', () => {
|
||||
render(<DocumentList list={mockList} onChange={onChange} />)
|
||||
expect(screen.getAllByTestId('file-icon')).toHaveLength(2)
|
||||
})
|
||||
|
||||
it('should call onChange with document on click', () => {
|
||||
render(<DocumentList list={mockList} onChange={onChange} />)
|
||||
fireEvent.click(screen.getByText('report'))
|
||||
expect(onChange).toHaveBeenCalledWith(mockList[0])
|
||||
})
|
||||
|
||||
it('should render empty list without errors', () => {
|
||||
const { container } = render(<DocumentList list={[]} onChange={onChange} />)
|
||||
expect(container.firstChild).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
@@ -3,7 +3,7 @@ import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import * as React from 'react'
|
||||
import { ChunkingMode, DataSourceType } from '@/models/datasets'
|
||||
import DocumentPicker from './index'
|
||||
import DocumentPicker from '../index'
|
||||
|
||||
// Mock portal-to-follow-elem - always render content for testing
|
||||
vi.mock('@/app/components/base/portal-to-follow-elem', () => ({
|
||||
@@ -52,25 +52,6 @@ vi.mock('@/service/knowledge/use-document', () => ({
|
||||
useDocumentList: mockUseDocumentList,
|
||||
}))
|
||||
|
||||
// Mock icons - mock all remixicon components used in the component tree
|
||||
vi.mock('@remixicon/react', () => ({
|
||||
RiArrowDownSLine: () => <span data-testid="arrow-icon">↓</span>,
|
||||
RiFile3Fill: () => <span data-testid="file-icon">📄</span>,
|
||||
RiFileCodeFill: () => <span data-testid="file-code-icon">📄</span>,
|
||||
RiFileExcelFill: () => <span data-testid="file-excel-icon">📄</span>,
|
||||
RiFileGifFill: () => <span data-testid="file-gif-icon">📄</span>,
|
||||
RiFileImageFill: () => <span data-testid="file-image-icon">📄</span>,
|
||||
RiFileMusicFill: () => <span data-testid="file-music-icon">📄</span>,
|
||||
RiFilePdf2Fill: () => <span data-testid="file-pdf-icon">📄</span>,
|
||||
RiFilePpt2Fill: () => <span data-testid="file-ppt-icon">📄</span>,
|
||||
RiFileTextFill: () => <span data-testid="file-text-icon">📄</span>,
|
||||
RiFileVideoFill: () => <span data-testid="file-video-icon">📄</span>,
|
||||
RiFileWordFill: () => <span data-testid="file-word-icon">📄</span>,
|
||||
RiMarkdownFill: () => <span data-testid="file-markdown-icon">📄</span>,
|
||||
RiSearchLine: () => <span data-testid="search-icon">🔍</span>,
|
||||
RiCloseLine: () => <span data-testid="close-icon">✕</span>,
|
||||
}))
|
||||
|
||||
// Factory function to create mock SimpleDocumentDetail
|
||||
const createMockDocument = (overrides: Partial<SimpleDocumentDetail> = {}): SimpleDocumentDetail => ({
|
||||
id: `doc-${Math.random().toString(36).substr(2, 9)}`,
|
||||
@@ -211,12 +192,6 @@ describe('DocumentPicker', () => {
|
||||
expect(screen.getByText('--')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render arrow icon', () => {
|
||||
renderComponent()
|
||||
|
||||
expect(screen.getByTestId('arrow-icon')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render general mode label', () => {
|
||||
renderComponent({
|
||||
value: {
|
||||
@@ -473,7 +448,7 @@ describe('DocumentPicker', () => {
|
||||
describe('Memoization Logic', () => {
|
||||
it('should be wrapped with React.memo', () => {
|
||||
// React.memo components have a $$typeof property
|
||||
expect((DocumentPicker as any).$$typeof).toBeDefined()
|
||||
expect((DocumentPicker as unknown as { $$typeof: symbol }).$$typeof).toBeDefined()
|
||||
})
|
||||
|
||||
it('should compute parentModeLabel correctly with useMemo', () => {
|
||||
@@ -952,7 +927,6 @@ describe('DocumentPicker', () => {
|
||||
|
||||
renderComponent({ onChange })
|
||||
|
||||
// Click on a document in the list
|
||||
fireEvent.click(screen.getByText('Document 2'))
|
||||
|
||||
// handleChange should find the document and call onChange with full document
|
||||
@@ -1026,8 +1000,9 @@ describe('DocumentPicker', () => {
|
||||
},
|
||||
})
|
||||
|
||||
// FileIcon should be rendered via DocumentFileIcon - pdf renders pdf icon
|
||||
expect(screen.getByTestId('file-pdf-icon')).toBeInTheDocument()
|
||||
// FileIcon should render an SVG icon for the file extension
|
||||
const trigger = screen.getByTestId('portal-trigger')
|
||||
expect(trigger.querySelector('svg')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,20 +1,7 @@
|
||||
import type { DocumentItem } from '@/models/datasets'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import * as React from 'react'
|
||||
import PreviewDocumentPicker from './preview-document-picker'
|
||||
|
||||
// Override shared i18n mock for custom translations
|
||||
vi.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string, params?: Record<string, unknown>) => {
|
||||
if (key === 'preprocessDocument' && params?.num)
|
||||
return `${params.num} files`
|
||||
|
||||
const prefix = params?.ns ? `${params.ns}.` : ''
|
||||
return `${prefix}${key}`
|
||||
},
|
||||
}),
|
||||
}))
|
||||
import PreviewDocumentPicker from '../preview-document-picker'
|
||||
|
||||
// Mock portal-to-follow-elem - always render content for testing
|
||||
vi.mock('@/app/components/base/portal-to-follow-elem', () => ({
|
||||
@@ -45,23 +32,6 @@ vi.mock('@/app/components/base/portal-to-follow-elem', () => ({
|
||||
),
|
||||
}))
|
||||
|
||||
// Mock icons
|
||||
vi.mock('@remixicon/react', () => ({
|
||||
RiArrowDownSLine: () => <span data-testid="arrow-icon">↓</span>,
|
||||
RiFile3Fill: () => <span data-testid="file-icon">📄</span>,
|
||||
RiFileCodeFill: () => <span data-testid="file-code-icon">📄</span>,
|
||||
RiFileExcelFill: () => <span data-testid="file-excel-icon">📄</span>,
|
||||
RiFileGifFill: () => <span data-testid="file-gif-icon">📄</span>,
|
||||
RiFileImageFill: () => <span data-testid="file-image-icon">📄</span>,
|
||||
RiFileMusicFill: () => <span data-testid="file-music-icon">📄</span>,
|
||||
RiFilePdf2Fill: () => <span data-testid="file-pdf-icon">📄</span>,
|
||||
RiFilePpt2Fill: () => <span data-testid="file-ppt-icon">📄</span>,
|
||||
RiFileTextFill: () => <span data-testid="file-text-icon">📄</span>,
|
||||
RiFileVideoFill: () => <span data-testid="file-video-icon">📄</span>,
|
||||
RiFileWordFill: () => <span data-testid="file-word-icon">📄</span>,
|
||||
RiMarkdownFill: () => <span data-testid="file-markdown-icon">📄</span>,
|
||||
}))
|
||||
|
||||
// Factory function to create mock DocumentItem
|
||||
const createMockDocumentItem = (overrides: Partial<DocumentItem> = {}): DocumentItem => ({
|
||||
id: `doc-${Math.random().toString(36).substr(2, 9)}`,
|
||||
@@ -134,19 +104,14 @@ describe('PreviewDocumentPicker', () => {
|
||||
expect(screen.getByText('--')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render arrow icon', () => {
|
||||
renderComponent()
|
||||
|
||||
expect(screen.getByTestId('arrow-icon')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render file icon', () => {
|
||||
renderComponent({
|
||||
value: createMockDocumentItem({ extension: 'txt' }),
|
||||
files: [], // Use empty files to avoid duplicate icons
|
||||
})
|
||||
|
||||
expect(screen.getByTestId('file-text-icon')).toBeInTheDocument()
|
||||
const trigger = screen.getByTestId('portal-trigger')
|
||||
expect(trigger.querySelector('svg')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should render pdf icon for pdf extension', () => {
|
||||
@@ -155,7 +120,8 @@ describe('PreviewDocumentPicker', () => {
|
||||
files: [], // Use empty files to avoid duplicate icons
|
||||
})
|
||||
|
||||
expect(screen.getByTestId('file-pdf-icon')).toBeInTheDocument()
|
||||
const trigger = screen.getByTestId('portal-trigger')
|
||||
expect(trigger.querySelector('svg')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -206,7 +172,8 @@ describe('PreviewDocumentPicker', () => {
|
||||
value: createMockDocumentItem({ name: 'test.docx', extension: 'docx' }),
|
||||
})
|
||||
|
||||
expect(screen.getByTestId('file-word-icon')).toBeInTheDocument()
|
||||
const trigger = screen.getByTestId('portal-trigger')
|
||||
expect(trigger.querySelector('svg')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -282,7 +249,7 @@ describe('PreviewDocumentPicker', () => {
|
||||
// Tests for component memoization
|
||||
describe('Component Memoization', () => {
|
||||
it('should be wrapped with React.memo', () => {
|
||||
expect((PreviewDocumentPicker as any).$$typeof).toBeDefined()
|
||||
expect((PreviewDocumentPicker as unknown as { $$typeof: symbol }).$$typeof).toBeDefined()
|
||||
})
|
||||
|
||||
it('should not re-render when props are the same', () => {
|
||||
@@ -329,7 +296,6 @@ describe('PreviewDocumentPicker', () => {
|
||||
|
||||
renderComponent({ files, onChange })
|
||||
|
||||
// Click on a document
|
||||
fireEvent.click(screen.getByText('Document 2'))
|
||||
|
||||
// handleChange should call onChange with the selected item
|
||||
@@ -506,21 +472,16 @@ describe('PreviewDocumentPicker', () => {
|
||||
})
|
||||
|
||||
describe('extension variations', () => {
|
||||
const extensions = [
|
||||
{ ext: 'txt', icon: 'file-text-icon' },
|
||||
{ ext: 'pdf', icon: 'file-pdf-icon' },
|
||||
{ ext: 'docx', icon: 'file-word-icon' },
|
||||
{ ext: 'xlsx', icon: 'file-excel-icon' },
|
||||
{ ext: 'md', icon: 'file-markdown-icon' },
|
||||
]
|
||||
const extensions = ['txt', 'pdf', 'docx', 'xlsx', 'md']
|
||||
|
||||
it.each(extensions)('should render correct icon for $ext extension', ({ ext, icon }) => {
|
||||
it.each(extensions)('should render icon for %s extension', (ext) => {
|
||||
renderComponent({
|
||||
value: createMockDocumentItem({ extension: ext }),
|
||||
files: [], // Use empty files to avoid duplicate icons
|
||||
})
|
||||
|
||||
expect(screen.getByTestId(icon)).toBeInTheDocument()
|
||||
const trigger = screen.getByTestId('portal-trigger')
|
||||
expect(trigger.querySelector('svg')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -543,7 +504,6 @@ describe('PreviewDocumentPicker', () => {
|
||||
|
||||
renderComponent({ files, onChange })
|
||||
|
||||
// Click on first document
|
||||
fireEvent.click(screen.getByText('Document 1'))
|
||||
|
||||
expect(onChange).toHaveBeenCalledWith(files[0])
|
||||
@@ -568,7 +528,7 @@ describe('PreviewDocumentPicker', () => {
|
||||
onChange={vi.fn()}
|
||||
/>,
|
||||
)
|
||||
expect(screen.getByText('3 files')).toBeInTheDocument()
|
||||
expect(screen.getByText(/dataset\.preprocessDocument/)).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -609,7 +569,6 @@ describe('PreviewDocumentPicker', () => {
|
||||
|
||||
renderComponent({ files, onChange })
|
||||
|
||||
// Click first document
|
||||
fireEvent.click(screen.getByText('Document 1'))
|
||||
|
||||
expect(onChange).toHaveBeenCalledWith(files[0])
|
||||
@@ -624,11 +583,9 @@ describe('PreviewDocumentPicker', () => {
|
||||
|
||||
renderComponent({ files: customFiles, onChange })
|
||||
|
||||
// Click on first custom file
|
||||
fireEvent.click(screen.getByText('Custom File 1'))
|
||||
expect(onChange).toHaveBeenCalledWith(customFiles[0])
|
||||
|
||||
// Click on second custom file
|
||||
fireEvent.click(screen.getByText('Custom File 2'))
|
||||
expect(onChange).toHaveBeenCalledWith(customFiles[1])
|
||||
})
|
||||
@@ -3,7 +3,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
|
||||
import { useAutoDisabledDocuments } from '@/service/knowledge/use-document'
|
||||
import AutoDisabledDocument from './auto-disabled-document'
|
||||
import AutoDisabledDocument from '../auto-disabled-document'
|
||||
|
||||
type AutoDisabledDocumentsResponse = { document_ids: string[] }
|
||||
|
||||
@@ -15,7 +15,6 @@ const createMockQueryResult = (
|
||||
isLoading,
|
||||
}) as ReturnType<typeof useAutoDisabledDocuments>
|
||||
|
||||
// Mock service hooks
|
||||
const mockMutateAsync = vi.fn()
|
||||
const mockInvalidDisabledDocument = vi.fn()
|
||||
|
||||
@@ -27,7 +26,6 @@ vi.mock('@/service/knowledge/use-document', () => ({
|
||||
useInvalidDisabledDocument: vi.fn(() => mockInvalidDisabledDocument),
|
||||
}))
|
||||
|
||||
// Mock Toast
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
default: {
|
||||
notify: vi.fn(),
|
||||
@@ -3,9 +3,8 @@ import { cleanup, fireEvent, render, screen, waitFor } from '@testing-library/re
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { retryErrorDocs } from '@/service/datasets'
|
||||
import { useDatasetErrorDocs } from '@/service/knowledge/use-dataset'
|
||||
import RetryButton from './index-failed'
|
||||
import RetryButton from '../index-failed'
|
||||
|
||||
// Mock service hooks
|
||||
const mockRefetch = vi.fn()
|
||||
|
||||
vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
@@ -1,6 +1,6 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import StatusWithAction from './status-with-action'
|
||||
import StatusWithAction from '../status-with-action'
|
||||
|
||||
describe('StatusWithAction', () => {
|
||||
describe('Rendering', () => {
|
||||
@@ -1,9 +1,8 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
import EconomicalRetrievalMethodConfig from './index'
|
||||
import EconomicalRetrievalMethodConfig from '../index'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../../settings/option-card', () => ({
|
||||
vi.mock('../../../settings/option-card', () => ({
|
||||
default: ({ children, title, description, disabled, id }: {
|
||||
children?: React.ReactNode
|
||||
title?: string
|
||||
@@ -18,7 +17,7 @@ vi.mock('../../settings/option-card', () => ({
|
||||
),
|
||||
}))
|
||||
|
||||
vi.mock('../retrieval-param-config', () => ({
|
||||
vi.mock('../../retrieval-param-config', () => ({
|
||||
default: ({ value, onChange, type }: {
|
||||
value: Record<string, unknown>
|
||||
onChange: (value: Record<string, unknown>) => void
|
||||
@@ -1,6 +1,6 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import ImageList from './index'
|
||||
import ImageList from '../index'
|
||||
|
||||
// Track handleImageClick calls for testing
|
||||
type FileEntity = {
|
||||
@@ -43,7 +43,7 @@ type ImageInfo = {
|
||||
}
|
||||
|
||||
// Mock ImagePreviewer since it uses createPortal
|
||||
vi.mock('../image-previewer', () => ({
|
||||
vi.mock('../../image-previewer', () => ({
|
||||
default: ({ images, initialIndex, onClose }: ImagePreviewerProps) => (
|
||||
<div data-testid="image-previewer">
|
||||
<span data-testid="preview-count">{images.length}</span>
|
||||
@@ -132,7 +132,6 @@ describe('ImageList', () => {
|
||||
const images = createMockImages(15)
|
||||
render(<ImageList images={images} size="md" limit={9} />)
|
||||
|
||||
// Click More button
|
||||
const moreButton = screen.getByText(/\+6/)
|
||||
fireEvent.click(moreButton)
|
||||
|
||||
@@ -182,7 +181,6 @@ describe('ImageList', () => {
|
||||
const images = createMockImages(3)
|
||||
const { rerender } = render(<ImageList images={images} size="md" />)
|
||||
|
||||
// Click first image to open preview
|
||||
const firstThumb = screen.getByTestId('file-thumb-https://example.com/image-1.png')
|
||||
fireEvent.click(firstThumb)
|
||||
|
||||
@@ -197,7 +195,6 @@ describe('ImageList', () => {
|
||||
const newImages = createMockImages(2) // Only 2 images
|
||||
rerender(<ImageList images={newImages} size="md" />)
|
||||
|
||||
// Click on a thumbnail that exists
|
||||
const validThumb = screen.getByTestId('file-thumb-https://example.com/image-1.png')
|
||||
fireEvent.click(validThumb)
|
||||
expect(screen.getByTestId('image-previewer')).toBeInTheDocument()
|
||||
@@ -1,6 +1,6 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import More from './more'
|
||||
import More from '../more'
|
||||
|
||||
describe('More', () => {
|
||||
describe('Rendering', () => {
|
||||
@@ -1,6 +1,6 @@
|
||||
import { act, fireEvent, render, screen, waitFor } from '@testing-library/react'
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import ImagePreviewer from './index'
|
||||
import ImagePreviewer from '../index'
|
||||
|
||||
// Mock fetch
|
||||
const mockFetch = vi.fn()
|
||||
@@ -12,7 +12,6 @@ const mockCreateObjectURL = vi.fn(() => 'blob:mock-url')
|
||||
globalThis.URL.revokeObjectURL = mockRevokeObjectURL
|
||||
globalThis.URL.createObjectURL = mockCreateObjectURL
|
||||
|
||||
// Mock Image
|
||||
class MockImage {
|
||||
onload: (() => void) | null = null
|
||||
onerror: (() => void) | null = null
|
||||
@@ -294,7 +293,6 @@ describe('ImagePreviewer', () => {
|
||||
expect(screen.getByText('image1.png')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// Click prev button multiple times - should stay at first image
|
||||
const buttons = document.querySelectorAll('button')
|
||||
const prevButton = Array.from(buttons).find(btn =>
|
||||
btn.className.includes('left-8'),
|
||||
@@ -325,7 +323,6 @@ describe('ImagePreviewer', () => {
|
||||
expect(screen.getByText('image3.png')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// Click next button multiple times - should stay at last image
|
||||
const buttons = document.querySelectorAll('button')
|
||||
const nextButton = Array.from(buttons).find(btn =>
|
||||
btn.className.includes('right-8'),
|
||||
@@ -372,7 +369,6 @@ describe('ImagePreviewer', () => {
|
||||
expect(screen.getByText(/Failed to load image/)).toBeInTheDocument()
|
||||
})
|
||||
|
||||
// Click retry button
|
||||
const retryButton = document.querySelector('button.rounded-full')
|
||||
if (retryButton) {
|
||||
await act(async () => {
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { FileEntity } from './types'
|
||||
import type { FileEntity } from '../types'
|
||||
import { act, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import {
|
||||
@@ -6,7 +6,7 @@ import {
|
||||
FileContextProvider,
|
||||
useFileStore,
|
||||
useFileStoreWithSelector,
|
||||
} from './store'
|
||||
} from '../store'
|
||||
|
||||
const createMockFile = (id: string): FileEntity => ({
|
||||
id,
|
||||
@@ -1,12 +1,12 @@
|
||||
import type { FileEntity } from './types'
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileUploadConfigResponse } from '@/models/common'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import {
|
||||
DEFAULT_IMAGE_FILE_BATCH_LIMIT,
|
||||
DEFAULT_IMAGE_FILE_SIZE_LIMIT,
|
||||
DEFAULT_SINGLE_CHUNK_ATTACHMENT_LIMIT,
|
||||
} from './constants'
|
||||
import { fileIsUploaded, getFileType, getFileUploadConfig, traverseFileEntry } from './utils'
|
||||
} from '../constants'
|
||||
import { fileIsUploaded, getFileType, getFileUploadConfig, traverseFileEntry } from '../utils'
|
||||
|
||||
describe('image-uploader utils', () => {
|
||||
describe('getFileType', () => {
|
||||
@@ -1,13 +1,12 @@
|
||||
import type { PropsWithChildren } from 'react'
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { act, fireEvent, render, renderHook, screen, waitFor } from '@testing-library/react'
|
||||
import * as React from 'react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import Toast from '@/app/components/base/toast'
|
||||
import { FileContextProvider } from '../store'
|
||||
import { useUpload } from './use-upload'
|
||||
import { FileContextProvider } from '../../store'
|
||||
import { useUpload } from '../use-upload'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useFileUploadConfig: vi.fn(() => ({
|
||||
data: {
|
||||
@@ -1,9 +1,8 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import { FileContextProvider } from '../store'
|
||||
import ImageInput from './image-input'
|
||||
import { FileContextProvider } from '../../store'
|
||||
import ImageInput from '../image-input'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useFileUploadConfig: vi.fn(() => ({
|
||||
data: {
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { fireEvent, render } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import ImageItem from './image-item'
|
||||
import ImageItem from '../image-item'
|
||||
|
||||
const createMockFile = (overrides: Partial<FileEntity> = {}): FileEntity => ({
|
||||
id: 'test-id',
|
||||
@@ -1,9 +1,8 @@
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import ImageUploaderInChunkWrapper from './index'
|
||||
import ImageUploaderInChunkWrapper from '../index'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useFileUploadConfig: vi.fn(() => ({
|
||||
data: {
|
||||
@@ -1,10 +1,9 @@
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { fireEvent, render } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import { FileContextProvider } from '../store'
|
||||
import ImageInput from './image-input'
|
||||
import { FileContextProvider } from '../../store'
|
||||
import ImageInput from '../image-input'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useFileUploadConfig: vi.fn(() => ({
|
||||
data: {
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { fireEvent, render } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import ImageItem from './image-item'
|
||||
import ImageItem from '../image-item'
|
||||
|
||||
const createMockFile = (overrides: Partial<FileEntity> = {}): FileEntity => ({
|
||||
id: 'test-id',
|
||||
@@ -1,9 +1,8 @@
|
||||
import type { FileEntity } from '../types'
|
||||
import type { FileEntity } from '../../types'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
import ImageUploaderInRetrievalTestingWrapper from './index'
|
||||
import ImageUploaderInRetrievalTestingWrapper from '../index'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@/service/use-common', () => ({
|
||||
useFileUploadConfig: vi.fn(() => ({
|
||||
data: {
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
WeightedScoreEnum,
|
||||
} from '@/models/datasets'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
import RetrievalMethodConfig from './index'
|
||||
import RetrievalMethodConfig from '../index'
|
||||
|
||||
// Mock provider context with controllable supportRetrievalMethods
|
||||
let mockSupportRetrievalMethods: RETRIEVE_METHOD[] = [
|
||||
@@ -37,7 +37,7 @@ vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', ()
|
||||
}))
|
||||
|
||||
// Mock child component RetrievalParamConfig to simplify testing
|
||||
vi.mock('../retrieval-param-config', () => ({
|
||||
vi.mock('../../retrieval-param-config', () => ({
|
||||
default: ({ type, value, onChange, showMultiModalTip }: {
|
||||
type: RETRIEVE_METHOD
|
||||
value: RetrievalConfig
|
||||
@@ -585,7 +585,7 @@ describe('RetrievalMethodConfig', () => {
|
||||
// Verify the component is wrapped with React.memo by checking its displayName or type
|
||||
expect(RetrievalMethodConfig).toBeDefined()
|
||||
// React.memo components have a $$typeof property
|
||||
expect((RetrievalMethodConfig as any).$$typeof).toBeDefined()
|
||||
expect((RetrievalMethodConfig as unknown as { $$typeof: symbol }).$$typeof).toBeDefined()
|
||||
})
|
||||
|
||||
it('should not re-render when props are the same', () => {
|
||||
@@ -1,10 +1,10 @@
|
||||
import type { ReactNode } from 'react'
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
import { retrievalIcon } from '../../create/icons'
|
||||
import RetrievalMethodInfo, { getIcon } from './index'
|
||||
import { retrievalIcon } from '../../../create/icons'
|
||||
import RetrievalMethodInfo, { getIcon } from '../index'
|
||||
|
||||
// Mock next/image
|
||||
// Override global next/image auto-mock: tests assert on rendered <img> src attributes via data-testid
|
||||
vi.mock('next/image', () => ({
|
||||
default: ({ src, alt, className }: { src: string, alt: string, className?: string }) => (
|
||||
<img src={src} alt={alt || ''} className={className} data-testid="method-icon" />
|
||||
@@ -24,7 +24,7 @@ vi.mock('@/app/components/base/radio-card', () => ({
|
||||
}))
|
||||
|
||||
// Mock icons
|
||||
vi.mock('../../create/icons', () => ({
|
||||
vi.mock('../../../create/icons', () => ({
|
||||
retrievalIcon: {
|
||||
vector: 'vector-icon.png',
|
||||
fullText: 'fulltext-icon.png',
|
||||
@@ -2,13 +2,7 @@ import type { RetrievalConfig } from '@/types/app'
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { RerankingModeEnum, WeightedScoreEnum } from '@/models/datasets'
|
||||
import { RETRIEVE_METHOD } from '@/types/app'
|
||||
import RetrievalParamConfig from './index'
|
||||
|
||||
vi.mock('react-i18next', () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => key,
|
||||
}),
|
||||
}))
|
||||
import RetrievalParamConfig from '../index'
|
||||
|
||||
const mockNotify = vi.fn()
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
@@ -268,7 +262,7 @@ describe('RetrievalParamConfig', () => {
|
||||
|
||||
expect(mockNotify).toHaveBeenCalledWith({
|
||||
type: 'error',
|
||||
message: 'errorMsg.rerankModelRequired',
|
||||
message: 'workflow.errorMsg.rerankModelRequired',
|
||||
})
|
||||
})
|
||||
|
||||
@@ -358,7 +352,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('form.retrievalSetting.multiModalTip')).toBeInTheDocument()
|
||||
expect(screen.getByText('datasetSettings.form.retrievalSetting.multiModalTip')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should not show multimodal tip when showMultiModalTip is false', () => {
|
||||
@@ -372,7 +366,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.queryByText('form.retrievalSetting.multiModalTip')).not.toBeInTheDocument()
|
||||
expect(screen.queryByText('datasetSettings.form.retrievalSetting.multiModalTip')).not.toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -505,7 +499,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('weightedScore.title')).toBeInTheDocument()
|
||||
expect(screen.getByText('dataset.weightedScore.title')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should have RerankingModel option', () => {
|
||||
@@ -517,7 +511,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('modelProvider.rerankModel.key')).toBeInTheDocument()
|
||||
expect(screen.getByText('common.modelProvider.rerankModel.key')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should show model selector when RerankingModel mode is selected', () => {
|
||||
@@ -570,7 +564,7 @@ describe('RetrievalParamConfig', () => {
|
||||
)
|
||||
|
||||
const radioCards = screen.getAllByTestId('radio-card')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'weightedScore.title')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'dataset.weightedScore.title')
|
||||
fireEvent.click(weightedScoreCard!)
|
||||
|
||||
expect(mockOnChange).toHaveBeenCalled()
|
||||
@@ -589,7 +583,7 @@ describe('RetrievalParamConfig', () => {
|
||||
)
|
||||
|
||||
const radioCards = screen.getAllByTestId('radio-card')
|
||||
const rerankModelCard = radioCards.find(card => card.getAttribute('data-title') === 'modelProvider.rerankModel.key')
|
||||
const rerankModelCard = radioCards.find(card => card.getAttribute('data-title') === 'common.modelProvider.rerankModel.key')
|
||||
fireEvent.click(rerankModelCard!)
|
||||
|
||||
expect(mockOnChange).not.toHaveBeenCalled()
|
||||
@@ -621,12 +615,12 @@ describe('RetrievalParamConfig', () => {
|
||||
)
|
||||
|
||||
const radioCards = screen.getAllByTestId('radio-card')
|
||||
const rerankModelCard = radioCards.find(card => card.getAttribute('data-title') === 'modelProvider.rerankModel.key')
|
||||
const rerankModelCard = radioCards.find(card => card.getAttribute('data-title') === 'common.modelProvider.rerankModel.key')
|
||||
fireEvent.click(rerankModelCard!)
|
||||
|
||||
expect(mockNotify).toHaveBeenCalledWith({
|
||||
type: 'error',
|
||||
message: 'errorMsg.rerankModelRequired',
|
||||
message: 'workflow.errorMsg.rerankModelRequired',
|
||||
})
|
||||
})
|
||||
|
||||
@@ -736,7 +730,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('form.retrievalSetting.multiModalTip')).toBeInTheDocument()
|
||||
expect(screen.getByText('datasetSettings.form.retrievalSetting.multiModalTip')).toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should not show multimodal tip for hybrid search with WeightedScore', () => {
|
||||
@@ -764,7 +758,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.queryByText('form.retrievalSetting.multiModalTip')).not.toBeInTheDocument()
|
||||
expect(screen.queryByText('datasetSettings.form.retrievalSetting.multiModalTip')).not.toBeInTheDocument()
|
||||
})
|
||||
|
||||
it('should not render rerank switch for hybrid search', () => {
|
||||
@@ -826,7 +820,7 @@ describe('RetrievalParamConfig', () => {
|
||||
/>,
|
||||
)
|
||||
|
||||
expect(screen.getByText('modelProvider.rerankModel.key')).toBeInTheDocument()
|
||||
expect(screen.getByText('common.modelProvider.rerankModel.key')).toBeInTheDocument()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -846,7 +840,7 @@ describe('RetrievalParamConfig', () => {
|
||||
)
|
||||
|
||||
const radioCards = screen.getAllByTestId('radio-card')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'weightedScore.title')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'dataset.weightedScore.title')
|
||||
fireEvent.click(weightedScoreCard!)
|
||||
|
||||
expect(mockOnChange).toHaveBeenCalled()
|
||||
@@ -880,7 +874,7 @@ describe('RetrievalParamConfig', () => {
|
||||
)
|
||||
|
||||
const radioCards = screen.getAllByTestId('radio-card')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'weightedScore.title')
|
||||
const weightedScoreCard = radioCards.find(card => card.getAttribute('data-title') === 'dataset.weightedScore.title')
|
||||
fireEvent.click(weightedScoreCard!)
|
||||
|
||||
expect(mockOnChange).toHaveBeenCalled()
|
||||
@@ -1,19 +1,17 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import Footer from './footer'
|
||||
import Footer from '../footer'
|
||||
|
||||
// Configurable mock for search params
|
||||
let mockSearchParams = new URLSearchParams()
|
||||
const mockReplace = vi.fn()
|
||||
|
||||
// Mock next/navigation
|
||||
vi.mock('next/navigation', () => ({
|
||||
useRouter: () => ({ replace: mockReplace }),
|
||||
useSearchParams: () => mockSearchParams,
|
||||
}))
|
||||
|
||||
// Mock service hook
|
||||
const mockInvalidDatasetList = vi.fn()
|
||||
vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
useInvalidDatasetList: () => mockInvalidDatasetList,
|
||||
@@ -23,7 +21,7 @@ vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
let capturedActiveTab: string | undefined
|
||||
let capturedDslUrl: string | undefined
|
||||
|
||||
vi.mock('./create-options/create-from-dsl-modal', () => ({
|
||||
vi.mock('../create-options/create-from-dsl-modal', () => ({
|
||||
default: ({ show, onClose, onSuccess, activeTab, dslUrl }: {
|
||||
show: boolean
|
||||
onClose: () => void
|
||||
@@ -48,9 +46,7 @@ vi.mock('./create-options/create-from-dsl-modal', () => ({
|
||||
},
|
||||
}))
|
||||
|
||||
// ============================================================================
|
||||
// Footer Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Footer', () => {
|
||||
beforeEach(() => {
|
||||
@@ -60,9 +56,6 @@ describe('Footer', () => {
|
||||
capturedDslUrl = undefined
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Footer />)
|
||||
@@ -88,9 +81,6 @@ describe('Footer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should open modal when import button is clicked', () => {
|
||||
render(<Footer />)
|
||||
@@ -104,12 +94,10 @@ describe('Footer', () => {
|
||||
it('should close modal when onClose is called', () => {
|
||||
render(<Footer />)
|
||||
|
||||
// Open modal
|
||||
const importButton = screen.getByText(/importDSL/i)
|
||||
fireEvent.click(importButton)
|
||||
expect(screen.getByTestId('dsl-modal')).toBeInTheDocument()
|
||||
|
||||
// Close modal
|
||||
const closeButton = screen.getByTestId('close-modal')
|
||||
fireEvent.click(closeButton)
|
||||
expect(screen.queryByTestId('dsl-modal')).not.toBeInTheDocument()
|
||||
@@ -118,7 +106,6 @@ describe('Footer', () => {
|
||||
it('should call invalidDatasetList on success', () => {
|
||||
render(<Footer />)
|
||||
|
||||
// Open modal
|
||||
const importButton = screen.getByText(/importDSL/i)
|
||||
fireEvent.click(importButton)
|
||||
|
||||
@@ -130,9 +117,6 @@ describe('Footer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container classes', () => {
|
||||
const { container } = render(<Footer />)
|
||||
@@ -147,9 +131,6 @@ describe('Footer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<Footer />)
|
||||
@@ -158,9 +139,7 @@ describe('Footer', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// URL Parameter Tests (Branch Coverage)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('URL Parameter Handling', () => {
|
||||
it('should set activeTab to FROM_URL when dslUrl is present', () => {
|
||||
mockSearchParams = new URLSearchParams('remoteInstallUrl=https://example.com/dsl')
|
||||
@@ -193,12 +172,10 @@ describe('Footer', () => {
|
||||
|
||||
render(<Footer />)
|
||||
|
||||
// Open modal
|
||||
const importButton = screen.getByText(/importDSL/i)
|
||||
fireEvent.click(importButton)
|
||||
expect(screen.getByTestId('dsl-modal')).toBeInTheDocument()
|
||||
|
||||
// Close modal
|
||||
const closeButton = screen.getByTestId('close-modal')
|
||||
fireEvent.click(closeButton)
|
||||
|
||||
@@ -210,11 +187,9 @@ describe('Footer', () => {
|
||||
|
||||
render(<Footer />)
|
||||
|
||||
// Open modal
|
||||
const importButton = screen.getByText(/importDSL/i)
|
||||
fireEvent.click(importButton)
|
||||
|
||||
// Close modal
|
||||
const closeButton = screen.getByTestId('close-modal')
|
||||
fireEvent.click(closeButton)
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import Header from './header'
|
||||
import Header from '../header'
|
||||
|
||||
// ============================================================================
|
||||
// Header Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Header', () => {
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Header />)
|
||||
@@ -41,9 +36,6 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container classes', () => {
|
||||
const { container } = render(<Header />)
|
||||
@@ -58,9 +50,6 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<Header />)
|
||||
@@ -1,35 +1,30 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import CreateFromPipeline from './index'
|
||||
import CreateFromPipeline from '../index'
|
||||
|
||||
// Mock child components to isolate testing
|
||||
vi.mock('./header', () => ({
|
||||
vi.mock('../header', () => ({
|
||||
default: () => <div data-testid="mock-header">Header</div>,
|
||||
}))
|
||||
|
||||
vi.mock('./list', () => ({
|
||||
vi.mock('../list', () => ({
|
||||
default: () => <div data-testid="mock-list">List</div>,
|
||||
}))
|
||||
|
||||
vi.mock('./footer', () => ({
|
||||
vi.mock('../footer', () => ({
|
||||
default: () => <div data-testid="mock-footer">Footer</div>,
|
||||
}))
|
||||
|
||||
vi.mock('../../base/effect', () => ({
|
||||
vi.mock('../../../base/effect', () => ({
|
||||
default: ({ className }: { className?: string }) => (
|
||||
<div data-testid="mock-effect" className={className}>Effect</div>
|
||||
),
|
||||
}))
|
||||
|
||||
// ============================================================================
|
||||
// CreateFromPipeline Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('CreateFromPipeline', () => {
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<CreateFromPipeline />)
|
||||
@@ -57,9 +52,6 @@ describe('CreateFromPipeline', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container classes', () => {
|
||||
const { container } = render(<CreateFromPipeline />)
|
||||
@@ -86,9 +78,7 @@ describe('CreateFromPipeline', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Component Order Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Component Order', () => {
|
||||
it('should render components in correct order', () => {
|
||||
const { container } = render(<CreateFromPipeline />)
|
||||
@@ -1,11 +1,9 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import DSLConfirmModal from './dsl-confirm-modal'
|
||||
import DSLConfirmModal from '../dsl-confirm-modal'
|
||||
|
||||
// ============================================================================
|
||||
// DSLConfirmModal Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('DSLConfirmModal', () => {
|
||||
const defaultProps = {
|
||||
@@ -17,9 +15,6 @@ describe('DSLConfirmModal', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<DSLConfirmModal {...defaultProps} />)
|
||||
@@ -50,9 +45,7 @@ describe('DSLConfirmModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Versions Display Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Versions Display', () => {
|
||||
it('should display imported version when provided', () => {
|
||||
render(
|
||||
@@ -81,9 +74,6 @@ describe('DSLConfirmModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should call onCancel when cancel button is clicked', () => {
|
||||
render(<DSLConfirmModal {...defaultProps} />)
|
||||
@@ -114,9 +104,7 @@ describe('DSLConfirmModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Button State Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Button State', () => {
|
||||
it('should enable confirm button by default', () => {
|
||||
render(<DSLConfirmModal {...defaultProps} />)
|
||||
@@ -140,9 +128,6 @@ describe('DSLConfirmModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have button container with proper styling', () => {
|
||||
render(<DSLConfirmModal {...defaultProps} />)
|
||||
@@ -1,11 +1,9 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import Header from './header'
|
||||
import Header from '../header'
|
||||
|
||||
// ============================================================================
|
||||
// Header Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Header', () => {
|
||||
const defaultProps = {
|
||||
@@ -16,9 +14,6 @@ describe('Header', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Header {...defaultProps} />)
|
||||
@@ -43,9 +38,6 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should call onClose when close button is clicked', () => {
|
||||
const { container } = render(<Header {...defaultProps} />)
|
||||
@@ -57,9 +49,6 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container styling', () => {
|
||||
const { container } = render(<Header {...defaultProps} />)
|
||||
@@ -80,9 +69,6 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<Header {...defaultProps} />)
|
||||
@@ -1,13 +1,12 @@
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
import { act, fireEvent, render, screen, waitFor } from '@testing-library/react'
|
||||
import DSLConfirmModal from './dsl-confirm-modal'
|
||||
import Header from './header'
|
||||
import CreateFromDSLModal, { CreateFromDSLModalTab } from './index'
|
||||
import Tab from './tab'
|
||||
import TabItem from './tab/item'
|
||||
import Uploader from './uploader'
|
||||
import DSLConfirmModal from '../dsl-confirm-modal'
|
||||
import Header from '../header'
|
||||
import CreateFromDSLModal, { CreateFromDSLModalTab } from '../index'
|
||||
import Tab from '../tab'
|
||||
import TabItem from '../tab/item'
|
||||
import Uploader from '../uploader'
|
||||
|
||||
// Mock next/navigation
|
||||
const mockPush = vi.fn()
|
||||
vi.mock('next/navigation', () => ({
|
||||
useRouter: () => ({
|
||||
@@ -15,7 +14,6 @@ vi.mock('next/navigation', () => ({
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock service hooks
|
||||
const mockImportDSL = vi.fn()
|
||||
const mockImportDSLConfirm = vi.fn()
|
||||
|
||||
@@ -37,7 +35,6 @@ vi.mock('@/app/components/workflow/plugin-dependency/hooks', () => ({
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock toast context
|
||||
const mockNotify = vi.fn()
|
||||
|
||||
vi.mock('use-context-selector', async () => {
|
||||
@@ -48,7 +45,6 @@ vi.mock('use-context-selector', async () => {
|
||||
}
|
||||
})
|
||||
|
||||
// Test data builders
|
||||
const createMockFile = (name = 'test.pipeline'): File => {
|
||||
return new File(['test content'], name, { type: 'application/octet-stream' })
|
||||
}
|
||||
@@ -88,9 +84,6 @@ describe('CreateFromDSLModal', () => {
|
||||
mockHandleCheckPluginDependencies.mockReset()
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Rendering Tests
|
||||
// ============================================
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing when show is true', () => {
|
||||
render(
|
||||
@@ -172,9 +165,6 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Props Testing
|
||||
// ============================================
|
||||
describe('Props', () => {
|
||||
it('should use FROM_FILE as default activeTab', () => {
|
||||
render(
|
||||
@@ -232,9 +222,6 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// State Management Tests
|
||||
// ============================================
|
||||
describe('State Management', () => {
|
||||
it('should switch between tabs', () => {
|
||||
render(
|
||||
@@ -248,7 +235,6 @@ describe('CreateFromDSLModal', () => {
|
||||
// Initially file tab is active
|
||||
expect(screen.getByText('app.dslUploader.button')).toBeInTheDocument()
|
||||
|
||||
// Click URL tab
|
||||
fireEvent.click(screen.getByText('app.importFromDSLUrl'))
|
||||
|
||||
// URL input should be visible
|
||||
@@ -317,9 +303,7 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// API Call Tests
|
||||
// ============================================
|
||||
describe('API Calls', () => {
|
||||
it('should call importDSL with URL mode when URL tab is active', async () => {
|
||||
mockImportDSL.mockResolvedValue(createImportDSLResponse())
|
||||
@@ -526,9 +510,7 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Event Handler Tests
|
||||
// ============================================
|
||||
describe('Event Handlers', () => {
|
||||
it('should call onClose when header close button is clicked', () => {
|
||||
const onClose = vi.fn()
|
||||
@@ -638,7 +620,6 @@ describe('CreateFromDSLModal', () => {
|
||||
|
||||
const importButton = screen.getByText('app.newApp.import').closest('button')!
|
||||
|
||||
// Click multiple times rapidly
|
||||
fireEvent.click(importButton)
|
||||
fireEvent.click(importButton)
|
||||
fireEvent.click(importButton)
|
||||
@@ -650,9 +631,6 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Memoization Tests
|
||||
// ============================================
|
||||
describe('Memoization', () => {
|
||||
it('should correctly compute buttonDisabled based on currentTab and file/URL', () => {
|
||||
render(
|
||||
@@ -684,9 +662,6 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Edge Cases Tests
|
||||
// ============================================
|
||||
describe('Edge Cases', () => {
|
||||
it('should handle empty URL gracefully', () => {
|
||||
render(
|
||||
@@ -842,9 +817,7 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// File Import Tests (covers readFile, handleFile, file mode import)
|
||||
// ============================================
|
||||
describe('File Import', () => {
|
||||
it('should read file content when file is selected', async () => {
|
||||
mockImportDSL.mockResolvedValue(createImportDSLResponse())
|
||||
@@ -877,7 +850,6 @@ describe('CreateFromDSLModal', () => {
|
||||
expect(importButton).not.toBeDisabled()
|
||||
})
|
||||
|
||||
// Click import button
|
||||
const importButton = screen.getByText('app.newApp.import').closest('button')!
|
||||
fireEvent.click(importButton)
|
||||
|
||||
@@ -927,9 +899,7 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// DSL Confirm Flow Tests (covers onDSLConfirm)
|
||||
// ============================================
|
||||
describe('DSL Confirm Flow', () => {
|
||||
it('should handle DSL confirm success', async () => {
|
||||
vi.useFakeTimers({ shouldAdvanceTime: true })
|
||||
@@ -978,7 +948,6 @@ describe('CreateFromDSLModal', () => {
|
||||
vi.advanceTimersByTime(400)
|
||||
})
|
||||
|
||||
// Click confirm button in error modal
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('app.newApp.Confirm')).toBeInTheDocument()
|
||||
})
|
||||
@@ -1027,7 +996,6 @@ describe('CreateFromDSLModal', () => {
|
||||
vi.advanceTimersByTime(400)
|
||||
})
|
||||
|
||||
// Click confirm - should return early since importId is empty
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('app.newApp.Confirm')).toBeInTheDocument()
|
||||
})
|
||||
@@ -1163,7 +1131,6 @@ describe('CreateFromDSLModal', () => {
|
||||
// There are two Cancel buttons now (one in main modal footer, one in error modal)
|
||||
// Find the Cancel button in the error modal context
|
||||
const cancelButtons = screen.getAllByText('app.newApp.Cancel')
|
||||
// Click the last Cancel button (the one in the error modal)
|
||||
fireEvent.click(cancelButtons[cancelButtons.length - 1])
|
||||
|
||||
vi.useRealTimers()
|
||||
@@ -1171,9 +1138,7 @@ describe('CreateFromDSLModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Header Component Tests
|
||||
// ============================================
|
||||
describe('Header', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1206,9 +1171,7 @@ describe('Header', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Tab Component Tests
|
||||
// ============================================
|
||||
describe('Tab', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1261,9 +1224,7 @@ describe('Tab', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Tab Item Component Tests
|
||||
// ============================================
|
||||
describe('TabItem', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1353,9 +1314,7 @@ describe('TabItem', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Uploader Component Tests
|
||||
// ============================================
|
||||
describe('Uploader', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1679,7 +1638,6 @@ describe('Uploader', () => {
|
||||
// After click, oncancel should be set
|
||||
})
|
||||
|
||||
// Click browse link to trigger selectHandle
|
||||
const browseLink = screen.getByText('app.dslUploader.browse')
|
||||
fireEvent.click(browseLink)
|
||||
|
||||
@@ -1755,9 +1713,7 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// DSLConfirmModal Component Tests
|
||||
// ============================================
|
||||
describe('DSLConfirmModal', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1923,9 +1879,6 @@ describe('DSLConfirmModal', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// ============================================
|
||||
// Integration Tests
|
||||
// ============================================
|
||||
describe('CreateFromDSLModal Integration', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
@@ -1958,7 +1911,6 @@ describe('CreateFromDSLModal Integration', () => {
|
||||
const input = screen.getByPlaceholderText('app.importFromDSLUrlPlaceholder')
|
||||
fireEvent.change(input, { target: { value: 'https://example.com/pipeline.yaml' } })
|
||||
|
||||
// Click import
|
||||
const importButton = screen.getByText('app.newApp.import').closest('button')!
|
||||
fireEvent.click(importButton)
|
||||
|
||||
@@ -1999,7 +1951,6 @@ describe('CreateFromDSLModal Integration', () => {
|
||||
const input = screen.getByPlaceholderText('app.importFromDSLUrlPlaceholder')
|
||||
fireEvent.change(input, { target: { value: 'https://example.com/old-pipeline.yaml' } })
|
||||
|
||||
// Click import
|
||||
const importButton = screen.getByText('app.newApp.import').closest('button')!
|
||||
fireEvent.click(importButton)
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import Uploader from './uploader'
|
||||
import Uploader from '../uploader'
|
||||
|
||||
// Mock ToastContext
|
||||
const mockNotify = vi.fn()
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
ToastContext: {
|
||||
@@ -17,17 +16,11 @@ vi.mock('use-context-selector', () => ({
|
||||
useContext: () => ({ notify: mockNotify }),
|
||||
}))
|
||||
|
||||
// ============================================================================
|
||||
// Test Data Factories
|
||||
// ============================================================================
|
||||
|
||||
const createMockFile = (name = 'test.pipeline', _size = 1024): File => {
|
||||
return new File(['test content'], name, { type: 'application/octet-stream' })
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Uploader Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Uploader', () => {
|
||||
const defaultProps = {
|
||||
@@ -39,9 +32,7 @@ describe('Uploader', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests - No File
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering - No File', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Uploader {...defaultProps} />)
|
||||
@@ -78,9 +69,7 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests - With File
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering - With File', () => {
|
||||
it('should render file name when file is provided', () => {
|
||||
const file = createMockFile('my-pipeline.pipeline')
|
||||
@@ -109,9 +98,6 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should open file dialog when browse is clicked', () => {
|
||||
render(<Uploader {...defaultProps} />)
|
||||
@@ -151,9 +137,7 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Custom className Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Custom className', () => {
|
||||
it('should apply custom className', () => {
|
||||
const { container } = render(<Uploader {...defaultProps} className="custom-class" />)
|
||||
@@ -168,9 +152,6 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container styling', () => {
|
||||
const { container } = render(<Uploader {...defaultProps} />)
|
||||
@@ -192,9 +173,6 @@ describe('Uploader', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<Uploader {...defaultProps} />)
|
||||
@@ -2,9 +2,8 @@ import type { ReactNode } from 'react'
|
||||
import { QueryClient, QueryClientProvider } from '@tanstack/react-query'
|
||||
import { act, renderHook, waitFor } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { CreateFromDSLModalTab, useDSLImport } from './use-dsl-import'
|
||||
import { CreateFromDSLModalTab, useDSLImport } from '../use-dsl-import'
|
||||
|
||||
// Mock next/navigation
|
||||
const mockPush = vi.fn()
|
||||
vi.mock('next/navigation', () => ({
|
||||
useRouter: () => ({
|
||||
@@ -12,7 +11,6 @@ vi.mock('next/navigation', () => ({
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock service hooks
|
||||
const mockImportDSL = vi.fn()
|
||||
const mockImportDSLConfirm = vi.fn()
|
||||
|
||||
@@ -34,7 +32,6 @@ vi.mock('@/app/components/workflow/plugin-dependency/hooks', () => ({
|
||||
}),
|
||||
}))
|
||||
|
||||
// Mock toast context
|
||||
const mockNotify = vi.fn()
|
||||
|
||||
vi.mock('use-context-selector', async () => {
|
||||
@@ -45,7 +42,6 @@ vi.mock('use-context-selector', async () => {
|
||||
}
|
||||
})
|
||||
|
||||
// Test data builders
|
||||
const createImportDSLResponse = (overrides = {}) => ({
|
||||
id: 'import-123',
|
||||
status: 'completed' as const,
|
||||
@@ -2,11 +2,9 @@ import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { CreateFromDSLModalTab } from '@/app/components/app/create-from-dsl-modal'
|
||||
import Tab from './index'
|
||||
import Tab from '../index'
|
||||
|
||||
// ============================================================================
|
||||
// Tab Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Tab', () => {
|
||||
const defaultProps = {
|
||||
@@ -18,9 +16,6 @@ describe('Tab', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Tab {...defaultProps} />)
|
||||
@@ -44,9 +39,7 @@ describe('Tab', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Active State Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Active State', () => {
|
||||
it('should mark file tab as active when currentTab is FROM_FILE', () => {
|
||||
const { container } = render(
|
||||
@@ -65,9 +58,6 @@ describe('Tab', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should call setCurrentTab with FROM_FILE when file tab is clicked', () => {
|
||||
render(<Tab {...defaultProps} currentTab={CreateFromDSLModalTab.FROM_URL} />)
|
||||
@@ -96,9 +86,6 @@ describe('Tab', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container styling', () => {
|
||||
const { container } = render(<Tab {...defaultProps} />)
|
||||
@@ -1,11 +1,9 @@
|
||||
import { fireEvent, render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import Item from './item'
|
||||
import Item from '../item'
|
||||
|
||||
// ============================================================================
|
||||
// Item Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('Item', () => {
|
||||
const defaultProps = {
|
||||
@@ -18,9 +16,6 @@ describe('Item', () => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<Item {...defaultProps} />)
|
||||
@@ -45,9 +40,7 @@ describe('Item', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Active State Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Active State', () => {
|
||||
it('should have tertiary text color when inactive', () => {
|
||||
const { container } = render(<Item {...defaultProps} isActive={false} />)
|
||||
@@ -68,9 +61,6 @@ describe('Item', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should call onClick when clicked', () => {
|
||||
render(<Item {...defaultProps} />)
|
||||
@@ -88,9 +78,6 @@ describe('Item', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper container styling', () => {
|
||||
const { container } = render(<Item {...defaultProps} />)
|
||||
@@ -99,9 +86,6 @@ describe('Item', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<Item {...defaultProps} />)
|
||||
@@ -1,14 +1,13 @@
|
||||
import { render, screen } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import BuiltInPipelineList from './built-in-pipeline-list'
|
||||
import BuiltInPipelineList from '../built-in-pipeline-list'
|
||||
|
||||
// Mock child components
|
||||
vi.mock('./create-card', () => ({
|
||||
vi.mock('../create-card', () => ({
|
||||
default: () => <div data-testid="create-card">CreateCard</div>,
|
||||
}))
|
||||
|
||||
vi.mock('./template-card', () => ({
|
||||
vi.mock('../template-card', () => ({
|
||||
default: ({ type, pipeline, showMoreOperations }: { type: string, pipeline: { name: string }, showMoreOperations?: boolean }) => (
|
||||
<div data-testid="template-card" data-type={type} data-show-more={String(showMoreOperations)}>
|
||||
{pipeline.name}
|
||||
@@ -19,7 +18,6 @@ vi.mock('./template-card', () => ({
|
||||
// Configurable locale mock
|
||||
let mockLocale = 'en-US'
|
||||
|
||||
// Mock hooks
|
||||
vi.mock('@/context/i18n', () => ({
|
||||
useLocale: () => mockLocale,
|
||||
}))
|
||||
@@ -36,9 +34,7 @@ vi.mock('@/service/use-pipeline', () => ({
|
||||
usePipelineTemplateList: (...args: unknown[]) => mockUsePipelineTemplateList(...args),
|
||||
}))
|
||||
|
||||
// ============================================================================
|
||||
// BuiltInPipelineList Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('BuiltInPipelineList', () => {
|
||||
beforeEach(() => {
|
||||
@@ -46,9 +42,6 @@ describe('BuiltInPipelineList', () => {
|
||||
mockLocale = 'en-US'
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
mockUsePipelineTemplateList.mockReturnValue({
|
||||
@@ -71,9 +64,7 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Loading State Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Loading State', () => {
|
||||
it('should not render TemplateCards when loading', () => {
|
||||
mockUsePipelineTemplateList.mockReturnValue({
|
||||
@@ -88,9 +79,7 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering with Data Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering with Data', () => {
|
||||
it('should render TemplateCard for each pipeline when not loading', () => {
|
||||
const mockPipelines = [
|
||||
@@ -136,9 +125,7 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// API Call Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('API Call', () => {
|
||||
it('should call usePipelineTemplateList with type built-in', () => {
|
||||
mockUsePipelineTemplateList.mockReturnValue({
|
||||
@@ -154,9 +141,6 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have grid layout', () => {
|
||||
mockUsePipelineTemplateList.mockReturnValue({
|
||||
@@ -181,9 +165,7 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Locale Handling Tests (Branch Coverage)
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Locale Handling', () => {
|
||||
it('should use zh-Hans locale when set', () => {
|
||||
mockLocale = 'zh-Hans'
|
||||
@@ -247,9 +229,7 @@ describe('BuiltInPipelineList', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Empty Data Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Empty Data', () => {
|
||||
it('should handle null pipeline_templates', () => {
|
||||
mockUsePipelineTemplateList.mockReturnValue({
|
||||
@@ -1,9 +1,8 @@
|
||||
import { fireEvent, render, screen, waitFor } from '@testing-library/react'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import CreateCard from './create-card'
|
||||
import CreateCard from '../create-card'
|
||||
|
||||
// Mock next/navigation
|
||||
const mockPush = vi.fn()
|
||||
vi.mock('next/navigation', () => ({
|
||||
useRouter: () => ({ push: mockPush }),
|
||||
@@ -14,14 +13,12 @@ vi.mock('@/app/components/base/amplitude', () => ({
|
||||
trackEvent: vi.fn(),
|
||||
}))
|
||||
|
||||
// Mock Toast
|
||||
vi.mock('@/app/components/base/toast', () => ({
|
||||
default: {
|
||||
notify: vi.fn(),
|
||||
},
|
||||
}))
|
||||
|
||||
// Mock service hooks
|
||||
const mockCreateEmptyDataset = vi.fn()
|
||||
const mockInvalidDatasetList = vi.fn()
|
||||
|
||||
@@ -35,18 +32,13 @@ vi.mock('@/service/knowledge/use-dataset', () => ({
|
||||
useInvalidDatasetList: () => mockInvalidDatasetList,
|
||||
}))
|
||||
|
||||
// ============================================================================
|
||||
// CreateCard Component Tests
|
||||
// ============================================================================
|
||||
|
||||
describe('CreateCard', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Rendering Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Rendering', () => {
|
||||
it('should render without crashing', () => {
|
||||
render(<CreateCard />)
|
||||
@@ -66,9 +58,6 @@ describe('CreateCard', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// User Interactions Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('User Interactions', () => {
|
||||
it('should call createEmptyDataset when clicked', async () => {
|
||||
mockCreateEmptyDataset.mockImplementation((_data, callbacks) => {
|
||||
@@ -154,9 +143,6 @@ describe('CreateCard', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Layout Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Layout', () => {
|
||||
it('should have proper card styling', () => {
|
||||
const { container } = render(<CreateCard />)
|
||||
@@ -177,9 +163,6 @@ describe('CreateCard', () => {
|
||||
})
|
||||
})
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Memoization Tests
|
||||
// --------------------------------------------------------------------------
|
||||
describe('Memoization', () => {
|
||||
it('should be memoized with React.memo', () => {
|
||||
const { rerender } = render(<CreateCard />)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user