Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 912bd29

Browse files
committedMay 15, 2024
SDK regeneration
1 parent b72be12 commit 912bd29

File tree

281 files changed

+1848
-1666
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

281 files changed

+1848
-1666
lines changed
 

‎package.json

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,30 +1,35 @@
11
{
22
"name": "cohere-ai",
3-
"version": "7.9.5",
3+
"version": "7.10.0",
44
"private": false,
55
"repository": "https://github.com/cohere-ai/cohere-typescript",
66
"main": "./index.js",
77
"types": "./index.d.ts",
88
"scripts": {
99
"format": "prettier . --write --ignore-unknown",
1010
"build": "tsc",
11-
"prepack": "cp -rv dist/. ."
11+
"prepack": "cp -rv dist/. .",
12+
"test": "jest"
1213
},
1314
"dependencies": {
1415
"url-join": "4.0.1",
1516
"form-data": "4.0.0",
17+
"formdata-node": "^6.0.3",
1618
"node-fetch": "2.7.0",
1719
"qs": "6.11.2",
18-
"js-base64": "3.7.2"
20+
"js-base64": "3.7.2",
21+
"form-data-encoder": "^4.0.2"
1922
},
2023
"devDependencies": {
2124
"@types/url-join": "4.0.1",
2225
"@types/qs": "6.9.8",
2326
"@types/node-fetch": "2.6.9",
27+
"jest": "^29.7.0",
28+
"@types/jest": "29.5.5",
29+
"ts-jest": "^29.1.2",
30+
"jest-environment-jsdom": "29.7.0",
2431
"@types/node": "17.0.33",
2532
"prettier": "2.7.1",
26-
"typescript": "4.6.4",
27-
"jest": "^29.7.0",
28-
"ts-jest": "^29.1.2"
33+
"typescript": "4.6.4"
2934
}
3035
}

‎src/api/client/requests/ChatRequest.ts

Lines changed: 47 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -2,71 +2,37 @@
22
* This file was auto-generated by Fern from our API Definition.
33
*/
44

5-
import * as Cohere from "../..";
5+
import * as Cohere from "../../index";
66

77
/**
88
* @example
99
* {
1010
* message: "Can you give me a global market overview of solar panels?",
1111
* stream: false,
12-
* chatHistory: [{
13-
* role: Cohere.ChatMessageRole.Chatbot,
14-
* message: "Hi!"
15-
* }, {
16-
* role: Cohere.ChatMessageRole.Chatbot,
17-
* message: "How can I help you today?"
18-
* }],
19-
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
20-
* temperature: 0.3
21-
* }
22-
*
23-
* @example
24-
* {
25-
* message: "Can you give me a global market overview of solar panels?",
26-
* stream: false,
27-
* chatHistory: [{
28-
* role: Cohere.ChatMessageRole.Chatbot,
29-
* message: "Hi!"
30-
* }, {
31-
* role: Cohere.ChatMessageRole.Chatbot,
32-
* message: "How can I help you today?"
33-
* }],
34-
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
35-
* temperature: 0.3
36-
* }
37-
*
38-
* @example
39-
* {
40-
* message: "Can you give me a global market overview of solar panels?",
41-
* stream: false,
42-
* chatHistory: [{
43-
* role: Cohere.ChatMessageRole.Chatbot,
44-
* message: "Hi!"
45-
* }, {
46-
* role: Cohere.ChatMessageRole.Chatbot,
47-
* message: "How can I help you today?"
48-
* }],
4912
* promptTruncation: Cohere.ChatRequestPromptTruncation.Off,
5013
* temperature: 0.3
5114
* }
5215
*/
5316
export interface ChatRequest {
5417
/**
5518
* Text input for the model to respond to.
19+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
5620
*
5721
*/
5822
message: string;
5923
/**
60-
* Defaults to `command-r`.
24+
* Defaults to `command-r-plus`.
6125
*
6226
* The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
27+
* Compatible Deployments: Cohere Platform, Private Deployments
6328
*
6429
*/
6530
model?: string;
6631
/**
6732
* When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
6833
*
6934
* The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
35+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
7036
*
7137
*/
7238
preamble?: string;
@@ -76,13 +42,15 @@ export interface ChatRequest {
7642
* Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
7743
*
7844
* The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
45+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
7946
*
8047
*/
81-
chatHistory?: Cohere.ChatMessage[];
48+
chatHistory?: Cohere.Message[];
8249
/**
8350
* An alternative to `chat_history`.
8451
*
8552
* Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
53+
* Compatible Deployments: Cohere Platform
8654
*
8755
*/
8856
conversationId?: string;
@@ -96,20 +64,23 @@ export interface ChatRequest {
9664
* With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
9765
*
9866
* With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
67+
* Compatible Deployments: Cohere Platform Only AUTO_PRESERVE_ORDER: Azure, AWS Sagemaker, Private Deployments
9968
*
10069
*/
10170
promptTruncation?: Cohere.ChatRequestPromptTruncation;
10271
/**
10372
* Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one.
10473
*
10574
* When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
75+
* Compatible Deployments: Cohere Platform
10676
*
10777
*/
10878
connectors?: Cohere.ChatConnector[];
10979
/**
11080
* Defaults to `false`.
11181
*
11282
* When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
83+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
11384
*
11485
*/
11586
searchQueriesOnly?: boolean;
@@ -131,69 +102,101 @@ export interface ChatRequest {
131102
* An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
132103
*
133104
* See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
105+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
134106
*
135107
*/
136108
documents?: Cohere.ChatDocument[];
109+
/**
110+
* Defaults to `"accurate"`.
111+
*
112+
* Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
113+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
114+
*
115+
*/
116+
citationQuality?: Cohere.ChatRequestCitationQuality;
137117
/**
138118
* Defaults to `0.3`.
139119
*
140120
* A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
141121
*
142122
* Randomness can be further maximized by increasing the value of the `p` parameter.
123+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
143124
*
144125
*/
145126
temperature?: number;
146127
/**
147128
* The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
129+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
148130
*
149131
*/
150132
maxTokens?: number;
151133
/**
152134
* The maximum number of input tokens to send to the model. If not specified, `max_input_tokens` is the model's context length limit minus a small buffer.
153135
*
154136
* Input will be truncated according to the `prompt_truncation` parameter.
137+
* Compatible Deployments: Cohere Platform
155138
*
156139
*/
157140
maxInputTokens?: number;
158141
/**
159142
* Ensures only the top `k` most likely tokens are considered for generation at each step.
160143
* Defaults to `0`, min value of `0`, max value of `500`.
144+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
161145
*
162146
*/
163147
k?: number;
164148
/**
165149
* Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
166150
* Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
151+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
167152
*
168153
*/
169154
p?: number;
170-
/** If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed. */
155+
/**
156+
* If specified, the backend will make a best effort to sample tokens
157+
* deterministically, such that repeated requests with the same
158+
* seed and parameters should return the same result. However,
159+
* determinism cannot be totally guaranteed.
160+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
161+
*
162+
*/
171163
seed?: number;
172164
/**
173165
* A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
166+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
174167
*
175168
*/
176169
stopSequences?: string[];
177170
/**
178171
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
179172
*
180173
* Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
174+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
181175
*
182176
*/
183177
frequencyPenalty?: number;
184178
/**
185179
* Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
186180
*
187181
* Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
182+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
188183
*
189184
*/
190185
presencePenalty?: number;
191-
/** When enabled, the user's prompt will be sent to the model without any pre-processing. */
186+
/**
187+
* When enabled, the user's prompt will be sent to the model without
188+
* any pre-processing.
189+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
190+
*
191+
*/
192192
rawPrompting?: boolean;
193+
/** The prompt is returned in the `prompt` response field when this is enabled. */
194+
returnPrompt?: boolean;
193195
/**
194196
* A list of available tools (functions) that the model may suggest invoking before producing a text response.
195197
*
196198
* When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
199+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
197200
*
198201
*/
199202
tools?: Cohere.Tool[];
@@ -219,7 +222,8 @@ export interface ChatRequest {
219222
* ]
220223
* ```
221224
* **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
225+
* Compatible Deployments: Cohere Platform, Azure, AWS Sagemaker, Private Deployments
222226
*
223227
*/
224-
toolResults?: Cohere.ChatRequestToolResultsItem[];
228+
toolResults?: Cohere.ToolResult[];
225229
}

0 commit comments

Comments
 (0)
Please sign in to comment.