Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(smart-apply): Adds custom model prompt for smart apply. #7328

Merged
merged 5 commits into from
Mar 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions lib/shared/src/sourcegraph-api/completions/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,10 @@ export interface CompletionParameters {
type: 'content'
content: string
}
// Rewrite and adaptive speculation is used by fireworks which improves performance for sparse rewrite tasks.
// https://docs.fireworks.ai/guides/predicted-outputs#using-predicted-outputs
rewriteSpeculation?: boolean
adaptiveSpeculation?: boolean
}

export interface SerializedCompletionParameters extends Omit<CompletionParameters, 'messages'> {
Expand Down
57 changes: 56 additions & 1 deletion vscode/src/completions/get-current-doc-context.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,12 @@ import { type WrappedParser, resetParsersCache } from '../tree-sitter/parser'

import type { DocumentContext } from '@sourcegraph/cody-shared'
import { getContextRange } from './doc-context-getters'
import { getCurrentDocContext, insertIntoDocContext } from './get-current-doc-context'
import {
getCurrentDocContext,
getPrefixWithCharLimit,
getSuffixWithCharLimit,
insertIntoDocContext,
} from './get-current-doc-context'
import { documentAndPosition, initTreeSitterParser } from './test-helpers'

function testGetCurrentDocContext(
Expand Down Expand Up @@ -742,3 +747,53 @@ describe('insertCompletionIntoDocContext', () => {
})
})
})

describe('getPrefixWithCharLimit', () => {
it('returns all lines when total length is within limit', () => {
const prefixLines = ['line1', 'line2', 'line3']
const result = getPrefixWithCharLimit(prefixLines, 100)
expect(result).toBe('line1\nline2\nline3')
})

it('returns subset of lines from the end when total length exceeds limit', () => {
const prefixLines = ['line1', 'line2', 'very_long_line3']
const result = getPrefixWithCharLimit(prefixLines, 20)
expect(result).toBe('line2\nvery_long_line3')
})

it('returns only last line when limit is small', () => {
const prefixLines = ['line1', 'line2', 'line3']
const result = getPrefixWithCharLimit(prefixLines, 5)
expect(result).toBe('line3')
})

it('handles empty array', () => {
const result = getPrefixWithCharLimit([], 100)
expect(result).toBe('')
})
})

describe('getSuffixWithCharLimit', () => {
it('returns all lines when total length is within limit', () => {
const suffixLines = ['line1', 'line2', 'line3']
const result = getSuffixWithCharLimit(suffixLines, 100)
expect(result).toBe('line1\nline2\nline3')
})

it('returns subset of lines from the start when total length exceeds limit', () => {
const suffixLines = ['very_long_line1', 'line2', 'line3']
const result = getSuffixWithCharLimit(suffixLines, 20)
expect(result).toBe('very_long_line1\nline2')
})

it('returns only first line when limit is small', () => {
const suffixLines = ['line1', 'line2', 'line3']
const result = getSuffixWithCharLimit(suffixLines, 5)
expect(result).toBe('line1')
})

it('handles empty array', () => {
const result = getSuffixWithCharLimit([], 100)
expect(result).toBe('')
})
})
52 changes: 27 additions & 25 deletions vscode/src/completions/get-current-doc-context.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,17 +84,7 @@ export function getCurrentDocContext(params: GetCurrentDocContextParams): Docume
const prefixLines = lines(completePrefixWithContextCompletion)
const suffixLines = lines(completeSuffix)
const prefix = getPrefix({ offset, maxPrefixLength, prefixLines })

let totalSuffix = 0
let endLine = 0
for (let i = 0; i < suffixLines.length; i++) {
if (totalSuffix + suffixLines[i].length > maxSuffixLength) {
break
}
endLine = i + 1
totalSuffix += suffixLines[i].length
}
const suffix = suffixLines.slice(0, endLine).join('\n')
const suffix = getSuffixWithCharLimit(suffixLines, maxSuffixLength)

return getDerivedDocContext({
maxPrefixLength,
Expand All @@ -119,24 +109,36 @@ interface GetPrefixParams {

function getPrefix(params: GetPrefixParams): string {
const { offset, maxPrefixLength, prefixLines } = params

let prefix: string
if (offset > maxPrefixLength) {
let total = 0
let startLine = prefixLines.length
for (let i = prefixLines.length - 1; i >= 0; i--) {
if (total + prefixLines[i].length > maxPrefixLength) {
break
}
startLine = i
total += prefixLines[i].length
return getPrefixWithCharLimit(prefixLines, maxPrefixLength)
}
return prefixLines.join('\n')
}

export function getPrefixWithCharLimit(prefixLines: string[], maxPrefixLength: number): string {
let total = 0
let startLine = prefixLines.length
for (let i = prefixLines.length - 1; i >= 0; i--) {
if (total + prefixLines[i].length > maxPrefixLength) {
break
}
prefix = prefixLines.slice(startLine).join('\n')
} else {
prefix = prefixLines.join('\n')
startLine = i
total += prefixLines[i].length
}
return prefixLines.slice(startLine).join('\n')
}

return prefix
export function getSuffixWithCharLimit(suffixLines: string[], maxSuffixLength: number): string {
let totalSuffix = 0
let endLine = 0
for (let i = 0; i < suffixLines.length; i++) {
if (totalSuffix + suffixLines[i].length > maxSuffixLength) {
break
}
endLine = i + 1
totalSuffix += suffixLines[i].length
}
return suffixLines.slice(0, endLine).join('\n')
}

interface GetDerivedDocContextParams {
Expand Down
30 changes: 1 addition & 29 deletions vscode/src/edit/adapters/base.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,4 @@
import {
type CompletionParameters,
type ModelContextWindow,
modelsService,
} from '@sourcegraph/cody-shared'
import type { CompletionParameters, ModelContextWindow } from '@sourcegraph/cody-shared'
import type { FixupTask } from '../../non-stop/FixupTask'

export interface ModelParametersInput {
Expand All @@ -15,27 +11,3 @@ export interface ModelParametersInput {
export interface ModelParameterProvider {
getModelParameters(args: ModelParametersInput): CompletionParameters
}

export class DefaultModelParameterProvider implements ModelParameterProvider {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Moved this to vscode/src/edit/adapters/default.ts

getModelParameters(args: ModelParametersInput): CompletionParameters {
const params = {
model: args.model,
stopSequences: args.stopSequences,
maxTokensToSample: args.contextWindow.output,
} as CompletionParameters

if (args.model.includes('gpt-4o')) {
// Use Predicted Output for gpt-4o models.
// https://platform.openai.com/docs/guides/predicted-outputs
params.prediction = {
type: 'content',
content: args.task.original,
}
}

if (modelsService.isStreamDisabled(args.model)) {
params.stream = false
}
return params
}
}
109 changes: 109 additions & 0 deletions vscode/src/edit/adapters/default.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import { modelsService } from '@sourcegraph/cody-shared'
import { describe, expect, it, vi } from 'vitest'
import type { ModelParametersInput } from './base'
import { DefaultModelParameterProvider } from './default'

describe('DefaultModelParameterProvider', () => {
const provider = new DefaultModelParameterProvider()

it('should return basic model parameters', () => {
const input: ModelParametersInput = {
model: 'gpt-4',
contextWindow: { input: 1000, output: 500 },
task: { original: 'test task' } as any,
}

const result = provider.getModelParameters(input)

expect(result).toEqual({
model: 'gpt-4',
maxTokensToSample: 500,
})
})

it('should include stop sequences when provided', () => {
const input: ModelParametersInput = {
model: 'gpt-4',
stopSequences: ['\n', 'END'],
contextWindow: { input: 1000, output: 500 },
task: { original: 'test task' } as any,
}

const result = provider.getModelParameters(input)

expect(result).toEqual({
model: 'gpt-4',
stopSequences: ['\n', 'END'],
maxTokensToSample: 500,
})
})

it('should include prediction for gpt-4o models', () => {
const input: ModelParametersInput = {
model: 'gpt-4o',
contextWindow: { input: 1000, output: 500 },
task: { original: 'test task' } as any,
}

const result = provider.getModelParameters(input)

expect(result).toEqual({
model: 'gpt-4o',
maxTokensToSample: 500,
prediction: {
type: 'content',
content: 'test task',
},
})
})

it('should disable streaming for specific models', () => {
// Mock the isStreamDisabled function
vi.spyOn(modelsService, 'isStreamDisabled').mockReturnValue(true)

const input: ModelParametersInput = {
model: 'gpt-4',
contextWindow: { input: 1000, output: 500 },
task: { original: 'test task' } as any,
}

const result = provider.getModelParameters(input)

expect(result).toEqual({
model: 'gpt-4',
maxTokensToSample: 500,
stream: false,
})

// Restore the mock
vi.restoreAllMocks()
})

it('should handle combined cases', () => {
// Mock the isStreamDisabled function
vi.spyOn(modelsService, 'isStreamDisabled').mockReturnValue(true)

const input: ModelParametersInput = {
model: 'gpt-4o',
stopSequences: ['\n', 'END'],
contextWindow: { input: 1000, output: 500 },
task: { original: 'test task' } as any,
}

const result = provider.getModelParameters(input)

expect(result).toEqual({
model: 'gpt-4o',
stopSequences: ['\n', 'END'],
maxTokensToSample: 500,
prediction: {
type: 'content',
content: 'test task',
},
stream: false,
})

// Restore the mock
vi.restoreAllMocks()
})
})
26 changes: 26 additions & 0 deletions vscode/src/edit/adapters/default.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import { type CompletionParameters, modelsService } from '@sourcegraph/cody-shared'
import type { ModelParameterProvider, ModelParametersInput } from './base'

export class DefaultModelParameterProvider implements ModelParameterProvider {
getModelParameters(args: ModelParametersInput): CompletionParameters {
const params = {
model: args.model,
stopSequences: args.stopSequences,
maxTokensToSample: args.contextWindow.output,
} as CompletionParameters

if (args.model.includes('gpt-4o')) {
// Use Predicted Output for gpt-4o models.
// https://platform.openai.com/docs/guides/predicted-outputs
params.prediction = {
type: 'content',
content: args.task.original,
}
}

if (modelsService.isStreamDisabled(args.model)) {
params.stream = false
}
return params
}
}
Loading
Loading