forked from cohere-ai/cohere-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtypes.go
2711 lines (2405 loc) · 95.9 KB
/
types.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file was auto-generated by Fern from our API Definition.
package api
import (
json "encoding/json"
fmt "fmt"
core "github.com/cohere-ai/cohere-go/core"
time "time"
)
type ChatRequest struct {
// Accepts a string.
// The chat message from the user to the model.
Message string `json:"message"`
// Defaults to `command`.
// The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models).
// Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models).
Model *string `json:"model,omitempty"`
// When specified, the default Cohere preamble will be replaced with the provided one.
PreambleOverride *string `json:"preamble_override,omitempty"`
// A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`.
ChatHistory []*ChatMessage `json:"chat_history,omitempty"`
// An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation.
// If a conversation with this id does not already exist, a new conversation will be created.
ConversationId *string `json:"conversation_id,omitempty"`
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
// Dictates how the prompt will be constructed.
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
PromptTruncation *ChatRequestPromptTruncation `json:"prompt_truncation,omitempty"`
// Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom connector, if you've made one.
// When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
Connectors []*ChatConnector `json:"connectors,omitempty"`
// Defaults to `false`.
// When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
SearchQueriesOnly *bool `json:"search_queries_only,omitempty"`
// A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Documents []ChatDocument `json:"documents,omitempty"`
// Defaults to `"accurate"`.
// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty"`
// Defaults to `0.3`
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Temperature *float64 `json:"temperature,omitempty"`
stream bool
}
func (c *ChatRequest) Stream() bool {
return c.stream
}
func (c *ChatRequest) UnmarshalJSON(data []byte) error {
type unmarshaler ChatRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*c = ChatRequest(body)
c.stream = false
return nil
}
func (c *ChatRequest) MarshalJSON() ([]byte, error) {
type embed ChatRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*c),
Stream: false,
}
return json.Marshal(marshaler)
}
type ChatStreamRequest struct {
// Accepts a string.
// The chat message from the user to the model.
Message string `json:"message"`
// Defaults to `command`.
// The identifier of the model, which can be one of the existing Cohere models or the full ID for a [finetuned custom model](/docs/training-custom-models).
// Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models).
Model *string `json:"model,omitempty"`
// When specified, the default Cohere preamble will be replaced with the provided one.
PreambleOverride *string `json:"preamble_override,omitempty"`
// A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`.
ChatHistory []*ChatMessage `json:"chat_history,omitempty"`
// An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation.
// If a conversation with this id does not already exist, a new conversation will be created.
ConversationId *string `json:"conversation_id,omitempty"`
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
// Dictates how the prompt will be constructed.
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
PromptTruncation *ChatStreamRequestPromptTruncation `json:"prompt_truncation,omitempty"`
// Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom connector, if you've made one.
// When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
Connectors []*ChatConnector `json:"connectors,omitempty"`
// Defaults to `false`.
// When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
SearchQueriesOnly *bool `json:"search_queries_only,omitempty"`
// A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Documents []ChatDocument `json:"documents,omitempty"`
// Defaults to `"accurate"`.
// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
CitationQuality *ChatStreamRequestCitationQuality `json:"citation_quality,omitempty"`
// Defaults to `0.3`
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
Temperature *float64 `json:"temperature,omitempty"`
stream bool
}
func (c *ChatStreamRequest) Stream() bool {
return c.stream
}
func (c *ChatStreamRequest) UnmarshalJSON(data []byte) error {
type unmarshaler ChatStreamRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*c = ChatStreamRequest(body)
c.stream = true
return nil
}
func (c *ChatStreamRequest) MarshalJSON() ([]byte, error) {
type embed ChatStreamRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*c),
Stream: true,
}
return json.Marshal(marshaler)
}
type ClassifyRequest struct {
// Represents a list of queries to be classified, each entry must not be empty. The maximum is 96 inputs.
Inputs []string `json:"inputs,omitempty"`
// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.
// Note: [Custom Models](/training-representation-models) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
Examples []*ClassifyRequestExamplesItem `json:"examples,omitempty"`
// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Model *string `json:"model,omitempty"`
// The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.ai/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters.
Preset *string `json:"preset,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *ClassifyRequestTruncate `json:"truncate,omitempty"`
}
type CreateClusterJobRequest struct {
Required interface{} `json:"required,omitempty"`
EmbeddingsUrl *string `json:"embeddings_url,omitempty"`
InputDatasetId *string `json:"input_dataset_id,omitempty"`
// Defaults to `10`. Parameter for HDBSCAN. Only clusters with this number of elements will be returned with a positive cluster number.
MinClusterSize *int `json:"min_cluster_size,omitempty"`
// Parameter for UMAP. A scalar governing how to balance global vs local structure in the data.
NNeighbors *int `json:"n_neighbors,omitempty"`
// Parameter for UMAP. A boolean governing whether the embeddings from UMAP (that will be clustered with HDBSCAN) are deterministic.
IsDeterministic *bool `json:"is_deterministic,omitempty"`
GenerateDescriptions *bool `json:"generate_descriptions,omitempty"`
}
type DetectLanguageRequest struct {
// List of strings to run the detection on.
Texts []string `json:"texts,omitempty"`
// The identifier of the model to generate with.
Model *string `json:"model,omitempty"`
}
type DetokenizeRequest struct {
// The list of tokens to be detokenized.
Tokens []int `json:"tokens,omitempty"`
// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
Model *string `json:"model,omitempty"`
}
type EmbedRequest struct {
// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
Texts []string `json:"texts,omitempty"`
// Defaults to embed-english-v2.0
//
// The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
//
// Available models and corresponding embedding dimensions:
//
// * `embed-english-v3.0` 1024
// * `embed-multilingual-v3.0` 1024
// * `embed-english-light-v3.0` 384
// * `embed-multilingual-light-v3.0` 384
//
// * `embed-english-v2.0` 4096
// * `embed-english-light-v2.0` 1024
// * `embed-multilingual-v2.0` 768
Model *string `json:"model,omitempty"`
// Specifies the type of input you're giving to the model. Not required for older versions of the embedding models (i.e. anything lower than v3), but is required for more recent versions (i.e. anything bigger than v2).
//
// * `"search_document"`: Use this when you encode documents for embeddings that you store in a vector database for search use-cases.
// * `"search_query"`: Use this when you query your vector DB to find relevant documents.
// * `"classification"`: Use this when you use the embeddings as an input to a text classifier.
// * `"clustering"`: Use this when you want to cluster the embeddings.
InputType *string `json:"input_type,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *EmbedRequestTruncate `json:"truncate,omitempty"`
}
type GenerateRequest struct {
// The input text that serves as the starting point for generating the response.
// Note: The prompt will be pre-processed and modified before reaching the model.
Prompt string `json:"prompt"`
// The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental).
// Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Model *string `json:"model,omitempty"`
// The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`.
NumGenerations *int `json:"num_generations,omitempty"`
// When `true`, the response will be a JSON stream of events. Streaming is beneficial for user interfaces that render the contents of the response piece by piece, as it gets generated.
//
// The final event will contain the complete response, and will contain an `is_finished` field set to `true`. The event will also contain a `finish_reason`, which can be one of the following:
// - `COMPLETE` - the model sent back a finished reply
// - `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens for its context length
// - `ERROR` - something went wrong when generating the reply
// - `ERROR_TOXIC` - the model generated a reply that was deemed toxic
Stream *bool `json:"stream,omitempty"`
// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
//
// This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details.
//
// Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt.
MaxTokens *int `json:"max_tokens,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *GenerateRequestTruncate `json:"truncate,omitempty"`
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
// Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
Temperature *float64 `json:"temperature,omitempty"`
// Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
// When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Preset *string `json:"preset,omitempty"`
// The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
EndSequences []string `json:"end_sequences,omitempty"`
// The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
StopSequences []string `json:"stop_sequences,omitempty"`
// Ensures only the top `k` most likely tokens are considered for generation at each step.
// Defaults to `0`, min value of `0`, max value of `500`.
K *int `json:"k,omitempty"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty"`
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.'
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
PresencePenalty *float64 `json:"presence_penalty,omitempty"`
// One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`.
//
// If `GENERATION` is selected, the token likelihoods will only be provided for generated text.
//
// If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text.
ReturnLikelihoods *GenerateRequestReturnLikelihoods `json:"return_likelihoods,omitempty"`
// Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. The format is `{token_id: bias}` where bias is a float between -10 and 10. Tokens can be obtained from text using [Tokenize](/reference/tokenize).
//
// For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text.
//
// Note: logit bias may not be supported for all custom models.
LogitBias map[string]float64 `json:"logit_bias,omitempty"`
}
type LoglikelihoodRequest struct {
// The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Model *string `json:"model,omitempty"`
// To be used in conjunction with `completion`. This will be interpolated into the user message in the prompt template.
Prompt *string `json:"prompt,omitempty"`
// To be used in conjunction with `prompt`. This will be interpolated into the chatbot reponse in the prompt template.
Completion *string `json:"completion,omitempty"`
// To be used on its own, this allows you to pass a custom prompt to the model that will not be interpolated into a prompt template.
RawPrompt *string `json:"raw_prompt,omitempty"`
}
type RerankRequest struct {
// The identifier of the model to use, one of : `rerank-english-v2.0`, `rerank-multilingual-v2.0`
Model *string `json:"model,omitempty"`
// The search query
Query string `json:"query"`
// A list of document objects or strings to rerank.
// If a document is provided the text fields is required and all other fields will be preserved in the response.
//
// The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000.
//
// We recommend a maximum of 1,000 documents for optimal endpoint performance.
Documents []*RerankRequestDocumentsItem `json:"documents,omitempty"`
// The number of most relevant documents or indices to return, defaults to the length of the documents
TopN *int `json:"top_n,omitempty"`
// - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request.
// - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request.
ReturnDocuments *bool `json:"return_documents,omitempty"`
// The maximum number of chunks to produce internally from a document
MaxChunksPerDoc *int `json:"max_chunks_per_doc,omitempty"`
}
type SummarizeRequest struct {
// The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English.
Text string `json:"text"`
// One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text.
Length *SummarizeRequestLength `json:"length,omitempty"`
// One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text.
Format *SummarizeRequestFormat `json:"format,omitempty"`
// The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better.
Model *string `json:"model,omitempty"`
// One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text.
Extractiveness *SummarizeRequestExtractiveness `json:"extractiveness,omitempty"`
// Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1.
Temperature *float64 `json:"temperature,omitempty"`
// A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda"
AdditionalCommand *string `json:"additional_command,omitempty"`
}
type TokenizeRequest struct {
// The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters.
Text string `json:"text"`
// An optional parameter to provide the model name. This will ensure that the tokenization uses the tokenizer used by that model.
Model *string `json:"model,omitempty"`
}
type ApiMeta struct {
ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty"`
Warnings []string `json:"warnings,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMeta) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMeta
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMeta(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMeta) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
type ApiMetaApiVersion struct {
Version string `json:"version"`
IsDeprecated *bool `json:"is_deprecated,omitempty"`
IsExperimental *bool `json:"is_experimental,omitempty"`
BilledUnits *ApiMetaApiVersionBilledUnits `json:"billed_units,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMetaApiVersion) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMetaApiVersion
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMetaApiVersion(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMetaApiVersion) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
type ApiMetaApiVersionBilledUnits struct {
// The number of billed input tokens.
InputTokens *float64 `json:"input_tokens,omitempty"`
// The number of billed output tokens.
OutputTokens *float64 `json:"output_tokens,omitempty"`
// The number of billed search units.
SearchUnits *float64 `json:"search_units,omitempty"`
// The number of billed classifications units.
Classifications *float64 `json:"classifications,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMetaApiVersionBilledUnits) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMetaApiVersionBilledUnits
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMetaApiVersionBilledUnits(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMetaApiVersionBilledUnits) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
// A section of the generated reply which cites external knowledge.
type ChatCitation struct {
// The index of text that the citation starts at, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have a start value of `7`. This is because the citation starts at `w`, which is the seventh character.
Start int `json:"start"`
// The index of text that the citation ends after, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have an end value of `11`. This is because the citation ends after `d`, which is the eleventh character.
End int `json:"end"`
// The text of the citation. For example, a generation of `Hello, world!` with a citation of `world` would have a text value of `world`.
Text string `json:"text"`
// Identifiers of documents cited by this section of the generated reply.
DocumentIds []string `json:"document_ids,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatCitation) UnmarshalJSON(data []byte) error {
type unmarshaler ChatCitation
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatCitation(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatCitation) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatCitationGenerationEvent struct {
// Citations for the generated reply.
Citations []*ChatCitation `json:"citations,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatCitationGenerationEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatCitationGenerationEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatCitationGenerationEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatCitationGenerationEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// The connector used for fetching documents.
type ChatConnector struct {
// The identifier of the connector. Currently only 'web-search' is supported.
Id string `json:"id"`
// Provides the connector with different settings at request time. The key/value pairs of this object are specific to each connector.
//
// The supported options are:
//
// **web-search**
//
// **site** - The web search results will be restricted to this domain (and TLD) when specified. Only a single domain is specified, and subdomains are also accepted.
// Examples:
// * `{"options": {"site": "cohere.com"}}` would restrict the results to all subdomains at cohere.com
// * `{"options": {"site": "txt.cohere.com"}}` would restrict the results to `txt.cohere.com`
Options map[string]interface{} `json:"options,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatConnector) UnmarshalJSON(data []byte) error {
type unmarshaler ChatConnector
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatConnector(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatConnector) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// Relevant information that could be used by the model to generate a more accurate reply.
// The contents of each document are generally short (under 300 words), and are passed in the form of a
// dictionary of strings. Some suggested keys are "text", "author", "date". Both the key name and the value will be
// passed to the model.
type ChatDocument = map[string]string
// A single message in a chat history. Contains the role of the sender, the text contents of the message, and optionally a username.
type ChatMessage struct {
Role ChatMessageRole `json:"role,omitempty"`
Message string `json:"message"`
UserName *string `json:"user_name,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatMessage) UnmarshalJSON(data []byte) error {
type unmarshaler ChatMessage
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatMessage(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatMessage) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatMessageRole string
const (
ChatMessageRoleChatbot ChatMessageRole = "CHATBOT"
ChatMessageRoleUser ChatMessageRole = "USER"
)
func NewChatMessageRoleFromString(s string) (ChatMessageRole, error) {
switch s {
case "CHATBOT":
return ChatMessageRoleChatbot, nil
case "USER":
return ChatMessageRoleUser, nil
}
var t ChatMessageRole
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatMessageRole) Ptr() *ChatMessageRole {
return &c
}
// Defaults to `"accurate"`.
// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
type ChatRequestCitationQuality string
const (
ChatRequestCitationQualityFast ChatRequestCitationQuality = "fast"
ChatRequestCitationQualityAccurate ChatRequestCitationQuality = "accurate"
)
func NewChatRequestCitationQualityFromString(s string) (ChatRequestCitationQuality, error) {
switch s {
case "fast":
return ChatRequestCitationQualityFast, nil
case "accurate":
return ChatRequestCitationQualityAccurate, nil
}
var t ChatRequestCitationQuality
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatRequestCitationQuality) Ptr() *ChatRequestCitationQuality {
return &c
}
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
// Dictates how the prompt will be constructed.
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
type ChatRequestPromptTruncation string
const (
ChatRequestPromptTruncationOff ChatRequestPromptTruncation = "OFF"
ChatRequestPromptTruncationAuto ChatRequestPromptTruncation = "AUTO"
)
func NewChatRequestPromptTruncationFromString(s string) (ChatRequestPromptTruncation, error) {
switch s {
case "OFF":
return ChatRequestPromptTruncationOff, nil
case "AUTO":
return ChatRequestPromptTruncationAuto, nil
}
var t ChatRequestPromptTruncation
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatRequestPromptTruncation) Ptr() *ChatRequestPromptTruncation {
return &c
}
type ChatSearchQueriesGenerationEvent struct {
// Generated search queries, meant to be used as part of the RAG flow.
SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatSearchQueriesGenerationEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatSearchQueriesGenerationEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatSearchQueriesGenerationEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatSearchQueriesGenerationEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// The generated search query. Contains the text of the query and a unique identifier for the query.
type ChatSearchQuery struct {
// The text of the search query.
Text string `json:"text"`
// Unique identifier for the generated search query. Useful for submitting feedback.
GenerationId string `json:"generation_id"`
_rawJSON json.RawMessage
}
func (c *ChatSearchQuery) UnmarshalJSON(data []byte) error {
type unmarshaler ChatSearchQuery
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatSearchQuery(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatSearchQuery) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatSearchResult struct {
SearchQuery *ChatSearchQuery `json:"search_query,omitempty"`
// The connector from which this result comes from.
Connector *ChatConnector `json:"connector,omitempty"`
// Identifiers of documents found by this search query.
DocumentIds []string `json:"document_ids,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatSearchResult) UnmarshalJSON(data []byte) error {
type unmarshaler ChatSearchResult
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatSearchResult(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatSearchResult) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatSearchResultsEvent struct {
// Conducted searches and the ids of documents retrieved from each of them.
SearchResults []*ChatSearchResult `json:"search_results,omitempty"`
// Documents fetched from searches or provided by the user.
Documents []ChatDocument `json:"documents,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatSearchResultsEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatSearchResultsEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatSearchResultsEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatSearchResultsEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatStreamEndEvent struct {
// - `COMPLETE` - the model sent back a finished reply
// - `ERROR_LIMIT` - the reply was cut off because the model reached the maximum number of tokens for its context length
// - `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens specified by the max_tokens parameter
// - `ERROR` - something went wrong when generating the reply
// - `ERROR_TOXIC` - the model generated a reply that was deemed toxic
FinishReason ChatStreamEndEventFinishReason `json:"finish_reason,omitempty"`
// The consolidated response from the model. Contains the generated reply and all the other information streamed back in the previous events.
Response *ChatStreamEndEventResponse `json:"response,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatStreamEndEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatStreamEndEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatStreamEndEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatStreamEndEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// - `COMPLETE` - the model sent back a finished reply
// - `ERROR_LIMIT` - the reply was cut off because the model reached the maximum number of tokens for its context length
// - `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens specified by the max_tokens parameter
// - `ERROR` - something went wrong when generating the reply
// - `ERROR_TOXIC` - the model generated a reply that was deemed toxic
type ChatStreamEndEventFinishReason string
const (
ChatStreamEndEventFinishReasonComplete ChatStreamEndEventFinishReason = "COMPLETE"
ChatStreamEndEventFinishReasonErrorLimit ChatStreamEndEventFinishReason = "ERROR_LIMIT"
ChatStreamEndEventFinishReasonMaxTokens ChatStreamEndEventFinishReason = "MAX_TOKENS"
ChatStreamEndEventFinishReasonError ChatStreamEndEventFinishReason = "ERROR"
ChatStreamEndEventFinishReasonErrorToxic ChatStreamEndEventFinishReason = "ERROR_TOXIC"
)
func NewChatStreamEndEventFinishReasonFromString(s string) (ChatStreamEndEventFinishReason, error) {
switch s {
case "COMPLETE":
return ChatStreamEndEventFinishReasonComplete, nil
case "ERROR_LIMIT":
return ChatStreamEndEventFinishReasonErrorLimit, nil
case "MAX_TOKENS":
return ChatStreamEndEventFinishReasonMaxTokens, nil
case "ERROR":
return ChatStreamEndEventFinishReasonError, nil
case "ERROR_TOXIC":
return ChatStreamEndEventFinishReasonErrorToxic, nil
}
var t ChatStreamEndEventFinishReason
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatStreamEndEventFinishReason) Ptr() *ChatStreamEndEventFinishReason {
return &c
}
// The consolidated response from the model. Contains the generated reply and all the other information streamed back in the previous events.
type ChatStreamEndEventResponse struct {
typeName string
NonStreamedChatResponse *NonStreamedChatResponse
SearchQueriesOnlyResponse *SearchQueriesOnlyResponse
}
func NewChatStreamEndEventResponseFromNonStreamedChatResponse(value *NonStreamedChatResponse) *ChatStreamEndEventResponse {
return &ChatStreamEndEventResponse{typeName: "nonStreamedChatResponse", NonStreamedChatResponse: value}
}
func NewChatStreamEndEventResponseFromSearchQueriesOnlyResponse(value *SearchQueriesOnlyResponse) *ChatStreamEndEventResponse {
return &ChatStreamEndEventResponse{typeName: "searchQueriesOnlyResponse", SearchQueriesOnlyResponse: value}
}
func (c *ChatStreamEndEventResponse) UnmarshalJSON(data []byte) error {
valueNonStreamedChatResponse := new(NonStreamedChatResponse)
if err := json.Unmarshal(data, &valueNonStreamedChatResponse); err == nil {
c.typeName = "nonStreamedChatResponse"
c.NonStreamedChatResponse = valueNonStreamedChatResponse
return nil
}
valueSearchQueriesOnlyResponse := new(SearchQueriesOnlyResponse)
if err := json.Unmarshal(data, &valueSearchQueriesOnlyResponse); err == nil {
c.typeName = "searchQueriesOnlyResponse"
c.SearchQueriesOnlyResponse = valueSearchQueriesOnlyResponse
return nil
}
return fmt.Errorf("%s cannot be deserialized as a %T", data, c)
}
func (c ChatStreamEndEventResponse) MarshalJSON() ([]byte, error) {
switch c.typeName {
default:
return nil, fmt.Errorf("invalid type %s in %T", c.typeName, c)
case "nonStreamedChatResponse":
return json.Marshal(c.NonStreamedChatResponse)
case "searchQueriesOnlyResponse":
return json.Marshal(c.SearchQueriesOnlyResponse)
}
}
type ChatStreamEndEventResponseVisitor interface {
VisitNonStreamedChatResponse(*NonStreamedChatResponse) error
VisitSearchQueriesOnlyResponse(*SearchQueriesOnlyResponse) error
}
func (c *ChatStreamEndEventResponse) Accept(visitor ChatStreamEndEventResponseVisitor) error {
switch c.typeName {
default:
return fmt.Errorf("invalid type %s in %T", c.typeName, c)
case "nonStreamedChatResponse":
return visitor.VisitNonStreamedChatResponse(c.NonStreamedChatResponse)
case "searchQueriesOnlyResponse":
return visitor.VisitSearchQueriesOnlyResponse(c.SearchQueriesOnlyResponse)
}
}
type ChatStreamEvent struct {
_rawJSON json.RawMessage
}
func (c *ChatStreamEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatStreamEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatStreamEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatStreamEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// Defaults to `"accurate"`.
// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
type ChatStreamRequestCitationQuality string
const (
ChatStreamRequestCitationQualityFast ChatStreamRequestCitationQuality = "fast"
ChatStreamRequestCitationQualityAccurate ChatStreamRequestCitationQuality = "accurate"
)
func NewChatStreamRequestCitationQualityFromString(s string) (ChatStreamRequestCitationQuality, error) {
switch s {
case "fast":
return ChatStreamRequestCitationQualityFast, nil
case "accurate":
return ChatStreamRequestCitationQualityAccurate, nil
}
var t ChatStreamRequestCitationQuality
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatStreamRequestCitationQuality) Ptr() *ChatStreamRequestCitationQuality {
return &c
}
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
// Dictates how the prompt will be constructed.
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit.
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
type ChatStreamRequestPromptTruncation string
const (
ChatStreamRequestPromptTruncationOff ChatStreamRequestPromptTruncation = "OFF"
ChatStreamRequestPromptTruncationAuto ChatStreamRequestPromptTruncation = "AUTO"
)
func NewChatStreamRequestPromptTruncationFromString(s string) (ChatStreamRequestPromptTruncation, error) {
switch s {
case "OFF":
return ChatStreamRequestPromptTruncationOff, nil
case "AUTO":
return ChatStreamRequestPromptTruncationAuto, nil
}
var t ChatStreamRequestPromptTruncation
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatStreamRequestPromptTruncation) Ptr() *ChatStreamRequestPromptTruncation {
return &c
}
type ChatStreamStartEvent struct {
// Unique identifier for the generated reply. Useful for submitting feedback.
GenerationId string `json:"generation_id"`
_rawJSON json.RawMessage
}
func (c *ChatStreamStartEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatStreamStartEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatStreamStartEvent(value)