feat: handle llm memory

This commit is contained in:
Joel 2024-03-01 15:07:29 +08:00
parent 6f6f032244
commit 0518da1e49
3 changed files with 725 additions and 524 deletions

View File

@ -1,11 +1,11 @@
import { MemoryRole } from '../../types'
import { BlockEnum } from '../../types'
import type { LLMNodeType } from './types'
import { Resolution } from '@/types/app'
export const mockData: LLMNodeType = {
title: 'Test',
desc: 'Test',
type: 'Test',
type: BlockEnum.LLM,
model: {
provider: 'openai',
name: 'gpt-4',
@ -26,7 +26,10 @@ export const mockData: LLMNodeType = {
],
prompt: [],
memory: {
role_prefix: MemoryRole.assistant,
role_prefix: {
user: 'user: ',
assistant: 'assistant: ',
},
window: {
enabled: false,
size: 0,

View File

@ -26,9 +26,11 @@ const Panel: FC = () => {
handleContextVarChange,
handleMemoryChange,
} = useConfig(mockData)
const isChatApp = true // TODO: get from app context
const model = inputs.model
const modelMode = inputs.model?.mode
const isChatMode = modelMode === 'chat'
const isChatModel = modelMode === 'chat'
const isCompletionModel = !isChatModel
return (
<div className='mt-2'>
@ -84,14 +86,18 @@ const Panel: FC = () => {
Prompt
</Field>
{/* */}
{isChatApp && isChatApp && (
<div className='text-xs text-gray-300'>Memory examples(Designing)</div>
)}
{/* Memory */}
{isChatMode && (
{isChatApp && (
<>
<MemoryConfig
readonly={readOnly}
payload={inputs.memory}
onChange={handleMemoryChange}
canSetRoleName
canSetRoleName={isCompletionModel}
/>
<Split />
</>

File diff suppressed because it is too large Load Diff