Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions agents/base2/base2-free-deepseek-v4.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import { createBase2 } from './base2'

const definition = {
...createBase2('free', {
noAskUser: true,
model: 'deepseek/deepseek-v4-pro',
}),
id: 'base2-free-deepseek-v4',
displayName: 'Buffy the DeepSeek V4 Free Orchestrator',
}
export default definition
2 changes: 2 additions & 0 deletions agents/types/agent-definition.ts
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,8 @@ export type ModelName =
| 'qwen/qwen3-30b-a3b:nitro'

// DeepSeek
| 'deepseek/deepseek-v4-pro'
| 'deepseek-v4-pro'
| 'deepseek/deepseek-chat-v3-0324'
| 'deepseek/deepseek-chat-v3-0324:nitro'
| 'deepseek/deepseek-r1-0528'
Expand Down
7 changes: 6 additions & 1 deletion common/src/constants/free-agents.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { parseAgentId } from '../util/agent-id-parsing'

import { FREEBUFF_GEMINI_THINKER_AGENT_ID } from './freebuff-gemini-thinker'
import {
FREEBUFF_DEEPSEEK_V4_PRO_MODEL_ID,
FREEBUFF_GEMINI_PRO_MODEL_ID,
SUPPORTED_FREEBUFF_MODELS,
} from './freebuff-models'
Expand All @@ -20,7 +21,10 @@ export const FREE_COST_MODE = 'free' as const
* excluded — they're spawned by the root, so counting them would inflate
* every user's apparent activity.
*/
export const FREEBUFF_ROOT_AGENT_IDS = ['base2-free'] as const
export const FREEBUFF_ROOT_AGENT_IDS = [
'base2-free',
'base2-free-deepseek-v4',
] as const
const FREEBUFF_ROOT_AGENT_ID_SET: ReadonlySet<string> = new Set(
FREEBUFF_ROOT_AGENT_IDS,
)
Expand All @@ -39,6 +43,7 @@ const FREEBUFF_ALLOWED_MODEL_IDS = SUPPORTED_FREEBUFF_MODELS.map(
export const FREE_MODE_AGENT_MODELS: Record<string, Set<string>> = {
// Root orchestrator
'base2-free': new Set(FREEBUFF_ALLOWED_MODEL_IDS),
'base2-free-deepseek-v4': new Set([FREEBUFF_DEEPSEEK_V4_PRO_MODEL_ID]),

// File exploration agents
'file-picker': new Set(['google/gemini-2.5-flash-lite']),
Expand Down
1 change: 1 addition & 0 deletions common/src/constants/freebuff-models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ export interface FreebuffModelOption {
* `getFreebuffDeploymentAvailabilityLabel()` instead. */
export const FREEBUFF_DEPLOYMENT_HOURS_LABEL = '9am ET-5pm PT every day'
export const FREEBUFF_GEMINI_PRO_MODEL_ID = 'google/gemini-3.1-pro-preview'
export const FREEBUFF_DEEPSEEK_V4_PRO_MODEL_ID = 'deepseek/deepseek-v4-pro'
export const FREEBUFF_GLM_MODEL_ID = 'z-ai/glm-5.1'
export const FREEBUFF_KIMI_MODEL_ID = 'moonshotai/kimi-k2.6'
export const FREEBUFF_MINIMAX_MODEL_ID = 'minimax/minimax-m2.7'
Expand Down
3 changes: 3 additions & 0 deletions common/src/constants/model-config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ export const ALLOWED_MODEL_PREFIXES = [
'openai',
'google',
'x-ai',
'deepseek',
] as const

export const costModes = [
Expand Down Expand Up @@ -55,6 +56,8 @@ export type openrouterModel =
export const deepseekModels = {
deepseekChat: 'deepseek-chat',
deepseekReasoner: 'deepseek-reasoner',
deepseekV4ProDirect: 'deepseek-v4-pro',
deepseekV4Pro: 'deepseek/deepseek-v4-pro',
} as const
export type DeepseekModel = (typeof deepseekModels)[keyof typeof deepseekModels]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,8 @@ export type ModelName =
| 'qwen/qwen3-30b-a3b:nitro'

// DeepSeek
| 'deepseek/deepseek-v4-pro'
| 'deepseek-v4-pro'
| 'deepseek/deepseek-chat-v3-0324'
| 'deepseek/deepseek-chat-v3-0324:nitro'
| 'deepseek/deepseek-r1-0528'
Expand Down
2 changes: 1 addition & 1 deletion evals/buffbench/main-single-eval.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ async function main() {

await runBuffBench({
evalDataPaths: [path.join(__dirname, 'eval-codebuff.json')],
agents: ['base2-free-evals'],
agents: ['base2-free-deepseek-v4'],
taskIds: ['server-agent-validation'],
saveTraces,
})
Expand Down
4 changes: 4 additions & 0 deletions packages/agent-runtime/src/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,7 @@ export const globalStopSequence = `${JSON.stringify(endsAgentStepParam)}`
* to diff sequential requests and find what's breaking prompt caching.
*/
export const CACHE_DEBUG_FULL_LOGGING = false

// Keep disabled by default to preserve mainline behavior until reasoning-token
// replay has been tested more thoroughly.
export const INCLUDE_REASONING_IN_MESSAGE_HISTORY = false
6 changes: 6 additions & 0 deletions packages/agent-runtime/src/tools/stream-parser.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import {
import { generateCompactId } from '@codebuff/common/util/string'

import { processStreamWithTools } from '../tool-stream-parser'
import { INCLUDE_REASONING_IN_MESSAGE_HISTORY } from '../constants'
import {
executeCustomToolCall,
executeToolCall,
Expand Down Expand Up @@ -276,6 +277,11 @@ export async function processStream(
}

if (chunk.type === 'reasoning') {
if (INCLUDE_REASONING_IN_MESSAGE_HISTORY && chunk.text) {
assistantMessages.push(
assistantMessage({ type: 'reasoning', text: chunk.text }),
)
}
onResponseChunk({
type: 'reasoning_delta',
text: chunk.text,
Expand Down
2 changes: 2 additions & 0 deletions packages/internal/src/env-schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ export const serverEnvSchema = clientEnvSchema.extend({
ANTHROPIC_API_KEY: z.string().min(1),
FIREWORKS_API_KEY: z.string().min(1),
CANOPYWAVE_API_KEY: z.string().min(1).optional(),
DEEPSEEK_API_KEY: z.string().min(1).optional(),
SILICONFLOW_API_KEY: z.string().min(1).optional(),
LINKUP_API_KEY: z.string().min(1),
CONTEXT7_API_KEY: z.string().optional(),
Expand Down Expand Up @@ -87,6 +88,7 @@ export const serverProcessEnv: ServerInput = {
ANTHROPIC_API_KEY: process.env.ANTHROPIC_API_KEY,
FIREWORKS_API_KEY: process.env.FIREWORKS_API_KEY,
CANOPYWAVE_API_KEY: process.env.CANOPYWAVE_API_KEY,
DEEPSEEK_API_KEY: process.env.DEEPSEEK_API_KEY,
SILICONFLOW_API_KEY: process.env.SILICONFLOW_API_KEY,
LINKUP_API_KEY: process.env.LINKUP_API_KEY,
CONTEXT7_API_KEY: process.env.CONTEXT7_API_KEY,
Expand Down
1 change: 1 addition & 0 deletions packages/internal/src/env.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ if (isCI) {
ensureEnvDefault('ANTHROPIC_API_KEY', 'test')
ensureEnvDefault('FIREWORKS_API_KEY', 'test')
ensureEnvDefault('CANOPYWAVE_API_KEY', 'test')
ensureEnvDefault('DEEPSEEK_API_KEY', 'test')
ensureEnvDefault('LINKUP_API_KEY', 'test')
ensureEnvDefault('GRAVITY_API_KEY', 'test')
ensureEnvDefault('IPINFO_TOKEN', 'test')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -509,6 +509,43 @@ describe('provider-specific metadata merging', () => {
])
})

it('should preserve assistant reasoning content with tool calls', () => {
const result = convertToOpenAICompatibleChatMessages([
{
role: 'assistant',
content: [
{ type: 'reasoning', text: 'Need the date first. ' },
{ type: 'reasoning', text: 'Then call weather.' },
{ type: 'text', text: 'Checking that now...' },
{
type: 'tool-call',
toolCallId: 'call1',
toolName: 'get_weather',
input: { location: 'Hangzhou' },
},
],
},
])

expect(result).toEqual([
{
role: 'assistant',
content: 'Checking that now...',
reasoning_content: 'Need the date first. Then call weather.',
tool_calls: [
{
id: 'call1',
type: 'function',
function: {
name: 'get_weather',
arguments: JSON.stringify({ location: 'Hangzhou' }),
},
},
],
},
])
})

it('should handle a single tool role message with multiple tool-result parts', () => {
const result = convertToOpenAICompatibleChatMessages([
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ export function convertToOpenAICompatibleChatMessages(

case 'assistant': {
let text = ''
let reasoningContent = ''
const toolCalls: Array<{
id: string
type: 'function'
Expand All @@ -78,6 +79,10 @@ export function convertToOpenAICompatibleChatMessages(
text += part.text
break
}
case 'reasoning': {
reasoningContent += part.text
break
}
case 'tool-call': {
toolCalls.push({
id: part.toolCallId,
Expand All @@ -96,6 +101,8 @@ export function convertToOpenAICompatibleChatMessages(
messages.push({
role: 'assistant',
content: text,
reasoning_content:
reasoningContent.length > 0 ? reasoningContent : undefined,
tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
...metadata,
})
Expand Down
Loading
Loading