mirror of
https://github.com/langgenius/dify.git
synced 2026-02-09 15:10:13 -05:00
feat: fix arch for vibe wf (#32140)
Co-authored-by: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -24,10 +24,10 @@ from core.helper.code_executor.python3.python3_code_provider import Python3CodeP
|
||||
from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload
|
||||
from core.llm_generator.llm_generator import LLMGenerator
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from core.workflow.generator import WorkflowGenerator
|
||||
from extensions.ext_database import db
|
||||
from libs.login import current_account_with_tenant, login_required
|
||||
from models import App
|
||||
from services.workflow_generator_service import WorkflowGeneratorService
|
||||
from services.workflow_service import WorkflowService
|
||||
|
||||
DEFAULT_REF_TEMPLATE_SWAGGER_2_0 = "#/definitions/{model}"
|
||||
@@ -290,7 +290,7 @@ class FlowchartGenerateApi(Resource):
|
||||
# Convert PreviousWorkflow to dict if present
|
||||
previous_workflow_dict = args.previous_workflow.model_dump() if args.previous_workflow else None
|
||||
|
||||
result = WorkflowGenerator.generate_workflow_flowchart(
|
||||
result = WorkflowGeneratorService.generate_workflow_flowchart(
|
||||
tenant_id=current_tenant_id,
|
||||
instruction=args.instruction,
|
||||
model_config=args.model_config_data,
|
||||
|
||||
@@ -32,11 +32,11 @@ from core.ops.ops_trace_manager import TraceQueueManager, TraceTask
|
||||
from core.ops.utils import measure_time
|
||||
from core.prompt.utils.prompt_template_parser import PromptTemplateParser
|
||||
from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionMetadataKey
|
||||
from core.workflow.generator import WorkflowGenerator
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_storage import storage
|
||||
from models import App, Message, WorkflowNodeExecutionModel
|
||||
from models.workflow import Workflow
|
||||
from services.workflow_generator_service import WorkflowGeneratorService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -301,7 +301,7 @@ class LLMGenerator:
|
||||
preferred_language: str | None = None,
|
||||
available_models: Sequence[dict[str, object]] | None = None,
|
||||
):
|
||||
return WorkflowGenerator.generate_workflow_flowchart(
|
||||
return WorkflowGeneratorService.generate_workflow_flowchart(
|
||||
tenant_id=tenant_id,
|
||||
instruction=instruction,
|
||||
model_config=model_config,
|
||||
|
||||
@@ -5,13 +5,11 @@ from collections.abc import Sequence
|
||||
|
||||
import json_repair
|
||||
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
SystemPromptMessage,
|
||||
TextPromptMessageContent,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.workflow.generator.prompts.builder_prompts import (
|
||||
BUILDER_SYSTEM_PROMPT,
|
||||
BUILDER_SYSTEM_PROMPT_V2,
|
||||
@@ -48,9 +46,9 @@ class WorkflowGenerator:
|
||||
@classmethod
|
||||
def generate_workflow_flowchart(
|
||||
cls,
|
||||
tenant_id: str,
|
||||
model_instance,
|
||||
model_parameters: dict,
|
||||
instruction: str,
|
||||
model_config: dict,
|
||||
available_nodes: Sequence[dict[str, object]] | None = None,
|
||||
existing_nodes: Sequence[dict[str, object]] | None = None,
|
||||
existing_edges: Sequence[dict[str, object]] | None = None,
|
||||
@@ -65,6 +63,25 @@ class WorkflowGenerator:
|
||||
"""
|
||||
Generates a Dify Workflow Flowchart from natural language instruction.
|
||||
|
||||
Architecture note: This is pure domain logic that receives model_instance
|
||||
as an injected dependency. Callers should use WorkflowGeneratorService
|
||||
which handles model instance creation.
|
||||
|
||||
Args:
|
||||
model_instance: ModelInstance for LLM invocation (injected)
|
||||
model_parameters: Model completion parameters
|
||||
instruction: Natural language workflow instruction
|
||||
available_nodes: Available workflow node types
|
||||
existing_nodes: Existing nodes (modification mode)
|
||||
existing_edges: Existing edges (modification mode)
|
||||
available_tools: Available tools for workflow
|
||||
selected_node_ids: Selected nodes for refinement
|
||||
previous_workflow: Previous workflow data
|
||||
regenerate_mode: Whether in regeneration mode
|
||||
preferred_language: Preferred output language
|
||||
available_models: Available model configurations
|
||||
use_graph_builder: Use graph builder algorithm
|
||||
|
||||
Pipeline:
|
||||
1. Planner: Analyze intent & select tools.
|
||||
2. Context Filter: Filter relevant tools (reduce tokens).
|
||||
@@ -72,15 +89,10 @@ class WorkflowGenerator:
|
||||
4. Repair: Fix common node/edge issues (NodeRepair, EdgeRepair).
|
||||
5. Validator: Check for errors & generate friendly hints.
|
||||
6. Renderer: Deterministic Mermaid generation.
|
||||
|
||||
Returns:
|
||||
dict with generation result
|
||||
"""
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=model_config.get("provider", ""),
|
||||
model=model_config.get("name", ""),
|
||||
)
|
||||
model_parameters = model_config.get("completion_params", {})
|
||||
available_tools_list = list(available_tools) if available_tools else []
|
||||
|
||||
# Check if this is modification mode (user is refining existing workflow)
|
||||
|
||||
109
api/services/workflow_generator_service.py
Normal file
109
api/services/workflow_generator_service.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Workflow Generator Service
|
||||
|
||||
Application service that coordinates workflow generation with model management.
|
||||
This service bridges the architectural boundary between core.workflow (domain)
|
||||
and core.model_manager (infrastructure).
|
||||
|
||||
Architecture:
|
||||
- Service layer can depend on both core.workflow and core.model_manager
|
||||
- Provides a clean facade for controllers
|
||||
- Handles model instance creation and injection
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from core.model_manager import ModelManager
|
||||
from core.model_runtime.entities.model_entities import ModelType
|
||||
from core.workflow.generator import WorkflowGenerator
|
||||
|
||||
|
||||
class WorkflowGeneratorService:
|
||||
"""
|
||||
Service for generating workflow flowcharts using LLM.
|
||||
|
||||
Responsibilities:
|
||||
1. Obtain model instance from ModelManager
|
||||
2. Delegate workflow generation to WorkflowGenerator
|
||||
3. Handle any service-level error transformation
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generate_workflow_flowchart(
|
||||
cls,
|
||||
tenant_id: str,
|
||||
instruction: str,
|
||||
model_config: dict,
|
||||
available_nodes: Sequence[dict[str, object]] | None = None,
|
||||
existing_nodes: Sequence[dict[str, object]] | None = None,
|
||||
existing_edges: Sequence[dict[str, object]] | None = None,
|
||||
available_tools: Sequence[dict[str, object]] | None = None,
|
||||
selected_node_ids: Sequence[str] | None = None,
|
||||
previous_workflow: dict[str, object] | None = None,
|
||||
regenerate_mode: bool = False,
|
||||
preferred_language: str | None = None,
|
||||
available_models: Sequence[dict[str, object]] | None = None,
|
||||
use_graph_builder: bool = False,
|
||||
) -> dict:
|
||||
"""
|
||||
Generate workflow flowchart from natural language instruction.
|
||||
|
||||
This service method:
|
||||
1. Creates model instance from model_config (infrastructure concern)
|
||||
2. Invokes WorkflowGenerator with the model instance (domain logic)
|
||||
|
||||
Args:
|
||||
tenant_id: Tenant identifier
|
||||
instruction: Natural language instruction for workflow
|
||||
model_config: Model configuration dict with provider, name, completion_params
|
||||
available_nodes: Available workflow nodes
|
||||
existing_nodes: Existing nodes (for modification mode)
|
||||
existing_edges: Existing edges (for modification mode)
|
||||
available_tools: Available tools for workflow
|
||||
selected_node_ids: Selected node IDs for refinement
|
||||
previous_workflow: Previous workflow data
|
||||
regenerate_mode: Whether in regeneration mode
|
||||
preferred_language: Preferred language for output
|
||||
available_models: Available model configurations
|
||||
use_graph_builder: Whether to use graph builder mode
|
||||
|
||||
Returns:
|
||||
dict with workflow generation result containing:
|
||||
- intent: "generate" | "off_topic" | "error"
|
||||
- flowchart: Mermaid diagram (if successful)
|
||||
- nodes: List of workflow nodes
|
||||
- edges: List of workflow edges
|
||||
- message: Status message
|
||||
- warnings: List of validation warnings
|
||||
- error: Error message (if failed)
|
||||
|
||||
Raises:
|
||||
Exception: If model instance creation fails
|
||||
"""
|
||||
# Service layer responsibility: coordinate infrastructure
|
||||
model_manager = ModelManager()
|
||||
model_instance = model_manager.get_model_instance(
|
||||
tenant_id=tenant_id,
|
||||
model_type=ModelType.LLM,
|
||||
provider=model_config.get("provider", ""),
|
||||
model=model_config.get("name", ""),
|
||||
)
|
||||
|
||||
model_parameters = model_config.get("completion_params", {})
|
||||
|
||||
# Delegate to domain layer with injected dependencies
|
||||
return WorkflowGenerator.generate_workflow_flowchart(
|
||||
model_instance=model_instance,
|
||||
model_parameters=model_parameters,
|
||||
instruction=instruction,
|
||||
available_nodes=available_nodes,
|
||||
existing_nodes=existing_nodes,
|
||||
existing_edges=existing_edges,
|
||||
available_tools=available_tools,
|
||||
selected_node_ids=selected_node_ids,
|
||||
previous_workflow=previous_workflow,
|
||||
regenerate_mode=regenerate_mode,
|
||||
preferred_language=preferred_language,
|
||||
available_models=available_models,
|
||||
use_graph_builder=use_graph_builder,
|
||||
)
|
||||
@@ -0,0 +1,83 @@
|
||||
"""
|
||||
Unit tests for WorkflowGeneratorService
|
||||
|
||||
Tests the service layer that bridges workflow generation and model management.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from services.workflow_generator_service import WorkflowGeneratorService
|
||||
|
||||
|
||||
class TestWorkflowGeneratorService:
|
||||
"""Test WorkflowGeneratorService"""
|
||||
|
||||
@patch("services.workflow_generator_service.ModelManager")
|
||||
@patch("services.workflow_generator_service.WorkflowGenerator")
|
||||
def test_generate_workflow_flowchart_calls_workflow_generator_with_model_instance(
|
||||
self, mock_workflow_generator, mock_model_manager_class
|
||||
):
|
||||
"""
|
||||
Test that service correctly:
|
||||
1. Creates model instance from ModelManager
|
||||
2. Calls WorkflowGenerator with injected model_instance
|
||||
"""
|
||||
# Arrange
|
||||
mock_model_manager = MagicMock()
|
||||
mock_model_manager_class.return_value = mock_model_manager
|
||||
|
||||
mock_model_instance = MagicMock()
|
||||
mock_model_manager.get_model_instance.return_value = mock_model_instance
|
||||
|
||||
mock_workflow_generator.generate_workflow_flowchart.return_value = {
|
||||
"intent": "generate",
|
||||
"flowchart": "graph TD",
|
||||
"nodes": [],
|
||||
"edges": [],
|
||||
}
|
||||
|
||||
model_config = {
|
||||
"provider": "openai",
|
||||
"name": "gpt-4",
|
||||
"completion_params": {"temperature": 0.7},
|
||||
}
|
||||
|
||||
# Act
|
||||
result = WorkflowGeneratorService.generate_workflow_flowchart(
|
||||
tenant_id="test-tenant",
|
||||
instruction="Create a workflow",
|
||||
model_config=model_config,
|
||||
)
|
||||
|
||||
# Assert - ModelManager called correctly
|
||||
mock_model_manager_class.assert_called_once()
|
||||
mock_model_manager.get_model_instance.assert_called_once()
|
||||
|
||||
# Assert - WorkflowGenerator called with model_instance (not config)
|
||||
mock_workflow_generator.generate_workflow_flowchart.assert_called_once()
|
||||
call_kwargs = mock_workflow_generator.generate_workflow_flowchart.call_args.kwargs
|
||||
|
||||
assert call_kwargs["model_instance"] == mock_model_instance
|
||||
assert call_kwargs["model_parameters"] == {"temperature": 0.7}
|
||||
assert call_kwargs["instruction"] == "Create a workflow"
|
||||
|
||||
# Assert - Result returned correctly
|
||||
assert result["intent"] == "generate"
|
||||
|
||||
@patch("services.workflow_generator_service.ModelManager")
|
||||
def test_generate_workflow_flowchart_propagates_model_manager_errors(self, mock_model_manager_class):
|
||||
"""Test that ModelManager errors are propagated"""
|
||||
# Arrange
|
||||
mock_model_manager = MagicMock()
|
||||
mock_model_manager_class.return_value = mock_model_manager
|
||||
mock_model_manager.get_model_instance.side_effect = ValueError("Model not found")
|
||||
|
||||
# Act & Assert
|
||||
with pytest.raises(ValueError, match="Model not found"):
|
||||
WorkflowGeneratorService.generate_workflow_flowchart(
|
||||
tenant_id="test-tenant",
|
||||
instruction="Create a workflow",
|
||||
model_config={"provider": "invalid", "name": "invalid"},
|
||||
)
|
||||
Reference in New Issue
Block a user