mirror of
https://github.com/langgenius/dify.git
synced 2026-02-09 23:20:12 -05:00
fix: enhanced structured output
This commit is contained in:
@@ -654,7 +654,6 @@ class LLMGenerator:
|
||||
return f"""You are a code generator for Dify workflow automation.
|
||||
|
||||
Generate {language} code to extract/transform available variables for the target parameter.
|
||||
If user is not talking about the code node, provide the existing data or blank data for user, following the schema.
|
||||
|
||||
## Target Parameter
|
||||
{parameter_block}
|
||||
@@ -668,6 +667,7 @@ If user is not talking about the code node, provide the existing data or blank d
|
||||
- Respect target constraints (options/min/max/default/multiple) if provided.
|
||||
- If existing code is provided, adapt it instead of rewriting from scratch.
|
||||
- Return only JSON that matches the provided schema.
|
||||
- If user is not talking about the code node, provide blank code/outputs/variables for user, say to user in `message`.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -156,10 +156,10 @@ def invoke_llm_with_structured_output(
|
||||
else:
|
||||
# Priority 3: Prompt-based fallback
|
||||
_set_response_format(model_parameters_with_json_schema, model_schema.parameter_rules)
|
||||
prompt_messages = _handle_prompt_based_schema(
|
||||
prompt_messages=prompt_messages,
|
||||
structured_output_schema=json_schema,
|
||||
)
|
||||
prompt_messages = _handle_prompt_based_schema(
|
||||
prompt_messages=prompt_messages,
|
||||
structured_output_schema=json_schema,
|
||||
)
|
||||
|
||||
llm_result = model_instance.invoke_llm(
|
||||
prompt_messages=list(prompt_messages),
|
||||
|
||||
Reference in New Issue
Block a user