-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathvscode.proposed.lmTools.d.ts
199 lines (163 loc) · 6.37 KB
/
vscode.proposed.lmTools.d.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
// version: 6
// https://github.com/microsoft/vscode/issues/213274
declare module 'vscode' {
// TODO@API capabilities
// API -> LM: an tool/function that is available to the language model
export interface LanguageModelChatTool {
// TODO@API should use "id" here to match vscode tools, or keep name to match OpenAI?
name: string;
description: string;
parametersSchema?: JSONSchema;
}
// API -> LM: add tools as request option
export interface LanguageModelChatRequestOptions {
// TODO@API this will be a heterogeneous array of different types of tools
tools?: LanguageModelChatTool[];
/**
* Force a specific tool to be used.
*/
toolChoice?: string;
}
// LM -> USER: function that should be used
export class LanguageModelChatResponseToolCallPart {
name: string;
toolCallId: string;
parameters: any;
constructor(name: string, toolCallId: string, parameters: any);
}
// LM -> USER: text chunk
export class LanguageModelChatResponseTextPart {
value: string;
constructor(value: string);
}
export interface LanguageModelChatResponse {
stream: AsyncIterable<LanguageModelChatResponseTextPart | LanguageModelChatResponseToolCallPart>;
}
// USER -> LM: the result of a function call
export class LanguageModelChatMessageToolResultPart {
toolCallId: string;
content: string;
isError: boolean;
constructor(toolCallId: string, content: string, isError?: boolean);
}
export interface LanguageModelChatMessage {
/**
* A heterogeneous array of other things that a message can contain as content.
* Some parts would be message-type specific for some models and wouldn't go together,
* but it's up to the chat provider to decide what to do about that.
* Can drop parts that are not valid for the message type.
* LanguageModelChatMessageToolResultPart: only on User messages
* LanguageModelChatResponseToolCallPart: only on Assistant messages
*/
content2: (string | LanguageModelChatMessageToolResultPart | LanguageModelChatResponseToolCallPart)[];
}
export interface LanguageModelToolResult {
/**
* The result can contain arbitrary representations of the content. An example might be a `PromptElementJSON` from `@vscode/prompt-tsx`, using the `contentType` exported by that library.
*/
[contentType: string]: any;
/**
* A string representation of the result which can be incorporated back into an LLM prompt without any special handling.
*/
toString(): string;
}
// Tool registration/invoking between extensions
export namespace lm {
/**
* Register a LanguageModelTool. The tool must also be registered in the package.json `languageModelTools` contribution point.
*/
export function registerTool(id: string, tool: LanguageModelTool): Disposable;
/**
* A list of all available tools.
*/
export const tools: ReadonlyArray<LanguageModelToolDescription>;
/**
* Invoke a tool with the given parameters.
* TODO@API Could request a set of contentTypes to be returned so they don't all need to be computed?
*/
export function invokeTool(id: string, options: LanguageModelToolInvocationOptions, token: CancellationToken): Thenable<LanguageModelToolResult>;
}
export type ChatParticipantToolToken = unknown;
export interface LanguageModelToolInvocationOptions {
toolInvocationToken: ChatParticipantToolToken | undefined;
/**
* Parameters with which to invoke the tool.
*/
parameters: Object;
/**
* Options to hint at how many tokens the tool should return in its response.
*/
tokenOptions?: {
/**
* If known, the maximum number of tokens the tool should emit in its result.
*/
tokenBudget: number;
/**
* Count the number of tokens in a message using the model specific tokenizer-logic.
* @param text A string.
* @param token Optional cancellation token. See {@link CancellationTokenSource} for how to create one.
* @returns A thenable that resolves to the number of tokens.
*/
countTokens(text: string, token?: CancellationToken): Thenable<number>;
};
}
export type JSONSchema = object;
export interface LanguageModelToolDescription {
/**
* A unique identifier for the tool.
*/
id: string;
/**
* A human-readable name for this tool that may be used to describe it in the UI.
*/
displayName: string | undefined;
/**
* A description of this tool that may be passed to a language model.
*/
modelDescription: string;
/**
* A JSON schema for the parameters this tool accepts.
*/
parametersSchema?: JSONSchema;
}
export interface LanguageModelTool {
// TODO@API should it be LanguageModelToolResult | string?
invoke(options: LanguageModelToolInvocationOptions, token: CancellationToken): ProviderResult<LanguageModelToolResult>;
}
export interface ChatLanguageModelToolReference {
/**
* The tool's ID. Refers to a tool listed in {@link lm.tools}.
*/
readonly id: string;
/**
* The start and end index of the reference in the {@link ChatRequest.prompt prompt}. When undefined, the reference was not part of the prompt text.
*
* *Note* that the indices take the leading `#`-character into account which means they can
* used to modify the prompt as-is.
*/
readonly range?: [start: number, end: number];
}
export interface ChatRequest {
/**
* The list of tools that the user attached to their request.
*
* *Note* that if tools are referenced in the text of the prompt, using `#`, the prompt contains
* references as authored and that it is up to the participant
* to further modify the prompt, for instance by inlining reference values or creating links to
* headings which contain the resolved values. References are sorted in reverse by their range
* in the prompt. That means the last reference in the prompt is the first in this list. This simplifies
* string-manipulation of the prompt.
*/
readonly toolReferences: readonly ChatLanguageModelToolReference[];
}
export interface ChatRequestTurn {
/**
* The list of tools were attached to this request.
*/
readonly toolReferences?: readonly ChatLanguageModelToolReference[];
}
}